pax_global_header00006660000000000000000000000064136501022760014514gustar00rootroot0000000000000052 comment=8c2e06fe01c1aec8783ff78a8d2358dd608fa0fd pygresql-5.1.2/000077500000000000000000000000001365010227600133675ustar00rootroot00000000000000pygresql-5.1.2/LICENSE.txt000066400000000000000000000025721365010227600152200ustar00rootroot00000000000000Written by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain Further modifications copyright (c) 2009-2020 by the PyGreSQL Development Team PyGreSQL is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses: Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pygresql-5.1.2/MANIFEST.in000066400000000000000000000007501365010227600151270ustar00rootroot00000000000000 include *.c include *.h include *.py include README.rst include LICENSE.txt include tox.ini recursive-include tests *.py exclude tests/LOCAL_PyGreSQL.py include docs/Makefile include docs/make.bat include docs/*.py include docs/*.rst include docs/*.txt exclude docs/index.rst recursive-include docs/community *.rst recursive-include docs/contents *.rst recursive-include docs/download *.rst recursive-include docs/_static *.css_t *.ico *.png recursive-include docs/_templates *.html pygresql-5.1.2/PKG-INFO000066400000000000000000000030011365010227600144560ustar00rootroot00000000000000Metadata-Version: 2.1 Name: PyGreSQL Version: 5.1.2 Summary: Python PostgreSQL Interfaces Home-page: http://www.pygresql.org Author: D'Arcy J. M. Cain Author-email: darcy@PyGreSQL.org License: PostgreSQL Download-URL: http://www.pygresql.org/download/ Description: PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Keywords: pygresql postgresql database api dbapi Platform: any Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: PostgreSQL License Classifier: Operating System :: OS Independent Classifier: Programming Language :: C Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development :: Libraries :: Python Modules Description-Content-Type: text/plain pygresql-5.1.2/PyGreSQL.egg-info/000077500000000000000000000000001365010227600164675ustar00rootroot00000000000000pygresql-5.1.2/PyGreSQL.egg-info/PKG-INFO000066400000000000000000000030011365010227600175560ustar00rootroot00000000000000Metadata-Version: 2.1 Name: PyGreSQL Version: 5.1.2 Summary: Python PostgreSQL Interfaces Home-page: http://www.pygresql.org Author: D'Arcy J. M. Cain Author-email: darcy@PyGreSQL.org License: PostgreSQL Download-URL: http://www.pygresql.org/download/ Description: PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Keywords: pygresql postgresql database api dbapi Platform: any Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: PostgreSQL License Classifier: Operating System :: OS Independent Classifier: Programming Language :: C Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: SQL Classifier: Topic :: Database Classifier: Topic :: Database :: Front-Ends Classifier: Topic :: Software Development :: Libraries :: Python Modules Description-Content-Type: text/plain pygresql-5.1.2/PyGreSQL.egg-info/SOURCES.txt000066400000000000000000000037201365010227600203550ustar00rootroot00000000000000LICENSE.txt MANIFEST.in README.rst pg.py pgconn.c pgdb.py pginternal.c pglarge.c pgmodule.c pgnotice.c pgquery.c pgsource.c pgtypes.h py3c.h setup.py tox.ini PyGreSQL.egg-info/PKG-INFO PyGreSQL.egg-info/SOURCES.txt PyGreSQL.egg-info/dependency_links.txt PyGreSQL.egg-info/not-zip-safe PyGreSQL.egg-info/top_level.txt docs/Makefile docs/about.rst docs/about.txt docs/announce.rst docs/conf.py docs/copyright.rst docs/make.bat docs/requirements.txt docs/start.txt docs/toc.txt docs/_static/favicon.ico docs/_static/pygresql.css_t docs/_static/pygresql.png docs/_templates/layout.html docs/community/homes.rst docs/community/index.rst docs/community/issues.rst docs/community/mailinglist.rst docs/community/source.rst docs/community/support.rst docs/contents/changelog.rst docs/contents/examples.rst docs/contents/general.rst docs/contents/index.rst docs/contents/install.rst docs/contents/tutorial.rst docs/contents/pg/adaptation.rst docs/contents/pg/connection.rst docs/contents/pg/db_types.rst docs/contents/pg/db_wrapper.rst docs/contents/pg/index.rst docs/contents/pg/introduction.rst docs/contents/pg/large_objects.rst docs/contents/pg/module.rst docs/contents/pg/notification.rst docs/contents/pg/query.rst docs/contents/pgdb/adaptation.rst docs/contents/pgdb/connection.rst docs/contents/pgdb/cursor.rst docs/contents/pgdb/index.rst docs/contents/pgdb/introduction.rst docs/contents/pgdb/module.rst docs/contents/pgdb/typecache.rst docs/contents/pgdb/types.rst docs/contents/postgres/advanced.rst docs/contents/postgres/basic.rst docs/contents/postgres/func.rst docs/contents/postgres/index.rst docs/contents/postgres/syscat.rst docs/download/download.rst docs/download/files.rst docs/download/index.rst tests/__init__.py tests/dbapi20.py tests/test_classic.py tests/test_classic_connection.py tests/test_classic_dbwrapper.py tests/test_classic_functions.py tests/test_classic_largeobj.py tests/test_classic_notification.py tests/test_dbapi20.py tests/test_dbapi20_copy.py tests/test_tutorial.pypygresql-5.1.2/PyGreSQL.egg-info/dependency_links.txt000066400000000000000000000000011365010227600225350ustar00rootroot00000000000000 pygresql-5.1.2/PyGreSQL.egg-info/not-zip-safe000066400000000000000000000000011365010227600207150ustar00rootroot00000000000000 pygresql-5.1.2/PyGreSQL.egg-info/top_level.txt000066400000000000000000000000141365010227600212140ustar00rootroot00000000000000_pg pg pgdb pygresql-5.1.2/README.rst000066400000000000000000000017371365010227600150660ustar00rootroot00000000000000PyGreSQL - Python interface for PostgreSQL ========================================== PyGreSQL is a Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. PyGreSQL should run on most platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre. D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. Starting with version 5.0, PyGreSQL also supports Python 3. Installation ------------ The simplest way to install PyGreSQL is to type:: $ pip install PyGreSQL For other ways of installing PyGreSQL and requirements, see the documentation. Documentation ------------- The documentation is available at `www.pygresql.org `_. At mirror of the documentation can be found at `pygresql.readthedocs.io `_. pygresql-5.1.2/docs/000077500000000000000000000000001365010227600143175ustar00rootroot00000000000000pygresql-5.1.2/docs/Makefile000066400000000000000000000163711365010227600157670ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyGreSQL.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyGreSQL.qhc" applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PyGreSQL" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyGreSQL" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." pygresql-5.1.2/docs/_static/000077500000000000000000000000001365010227600157455ustar00rootroot00000000000000pygresql-5.1.2/docs/_static/favicon.ico000066400000000000000000000221561365010227600200740ustar00rootroot00000000000000h6 @@(F( 569:;<=>@AAABDEFHIIJKLMOOQRUVVVXZ]tNNSQSS])W[^*W`,`(a-]b.c/e$d0d/e1b f2g2g3g4l>j1|UlŸMMMMMMF MMMMMMMMMMMM MMMMMMMMMI @@=MMMMMMM7'.1@@@MMMMM @@@MMMM 69@@@MMMM @A @@@MLLM )@@J@@@@@KMMM @@4@@M@>>(+MM@@?@@M>:5%M#3MMMD@@@/><80-H-0MMMM@@@M><80,*-$MMMMMMMMMMMMBEMMMMMMMMMMMMMMMMMM( @44556677899::;;<==>>????@@AAABBCDEFGGHHIIJJJKKKLLLLLMOOOOPPQQRRSSTUUVVVWXYYZZ\]]^^``bceglnnntvMNNOSQSSXSST])TTUUWW[^*^+VXZ[_+WY[\`,a.\`(a-]]^^b.```"c/d e$` d0`c(c*d/e1b f2f1f2g2g3g4i*l>qj1|Uagl®ųʻͽŸǾԱ?E /D  "'ԙ "&6H5 "&+J !#&+_Ԓchl{|%'-4fIKMNPRTUVW*-48 !$'-48< !$(-48<@tt !$(.r@C}Դ  !248>ZSԷG  ":>AFqLs9 ")X[]dw= "%mΦԦQ "&,ԦϜԻԶ "&+YԦԦjv "&+3uԦԦoԠ0&-47ԦԢiԟ148;ԦԢԖԖOB\ԦԢԐԊgԯʃübԥԫ¢e`ԥԦyyԁԭ͂yp¾yЦ~Ԧy^pyԝԨxppyanԳkzyyyԸ(@34689:===?AAEEHHIJLLMNPQRRRRTTTUVVVYYYY[]]]]^``bcdfghlmnnnnoppttvxyzLMFCPTMRWUVXYY])U[^*Y[[\] ^%_,Z[ [\\]```,]]`)Z[\\]]^^^`b.]```!a%b)d e$j0d*d0ad-yRb e-f2g4h3tKg)~\i*l>mAqj/j0j4n>nuzpDo;tHzQ|V}U^aabflkmqqyųƴȶȸʻ̼;ĶŸǾǻȽ0  !)O Q4""_ nQ8{_.&j6 lBA&_gp_H  z""Z&,[ ""&&&'| z55::@=M[ s_[_[ Q_! 3[[x- ʕ}Jf [[𕕕}N 7zjl[Q[ gxޔ[k! [_SSqg< [̔֎SSG[єkQQ*v˔_[[xr{ggrKQ9+?Q}ƅ[aTbKrQޕucQXTd_ؔRKokoKEooo_oooQEooooKƕ_ShooooeYD~ReoooT핕oooee]]]]eeeood_oooee]]]]ED]eeooo_oooee]]YY]]]eeooە䕕oooe]]YYLY]]eeoo__oooe]]YESYY]]eeo\v啕oooe]]DYYY]]eeog_oooe]_YYYY]]eeE瓕oooVYYYY]]]eSQƆU]]]]]]]R[_yy_QwSir`KEe]]]]YEϵoopygresql-5.1.2/docs/_static/pygresql.css_t000066400000000000000000000033141365010227600206510ustar00rootroot00000000000000{% macro experimental(keyword, value) %} {% if value %} -moz-{{keyword}}: {{value}}; -webkit-{{keyword}}: {{value}}; -o-{{keyword}}: {{value}}; -ms-{{keyword}}: {{value}}; {{keyword}}: {{value}}; {% endif %} {% endmacro %} {% macro border_radius(value) -%} {{experimental("border-radius", value)}} {% endmacro %} {% macro box_shadow(value) -%} {{experimental("box-shadow", value)}} {% endmacro %} .pageheader.related { text-align: left; padding: 10px 15px; border: 1px solid #eeeeee; margin-bottom: 10px; {{border_radius("1em 1em 1em 1em")}} {% if theme_borderless_decor | tobool %} border-top: 0; border-bottom: 0; {% endif %} } .pageheader.related .logo { font-size: 36px; font-style: italic; letter-spacing: 5px; margin-right: 2em; } .pageheader.related .logo { font-size: 36px; font-style: italic; letter-spacing: 5px; margin-right: 2em; } .pageheader.related .logo a, .pageheader.related .logo a:hover { background: transparent; color: {{ theme_relbarlinkcolor }}; border: none; text-decoration: none; text-shadow: none; {{box_shadow("none")}} } .pageheader.related ul { float: right; margin: 2px 1em; } .pageheader.related li { float: left; margin: 0 0 0 10px; } .pageheader.related li a { padding: 8px 12px; } .norelbar .subtitle { font-size: 14px; line-height: 18px; font-weight: bold; letter-spacing: 4px; text-align: right; padding: 0 1em; margin-top: -9px; } .relbar-top .related.norelbar { height: 22px; border-bottom: 14px solid #eeeeee; } .relbar-bottom .related.norelbar { height: 22px; border-top: 14px solid #eeeeee; } pygresql-5.1.2/docs/_static/pygresql.png000066400000000000000000000227671365010227600203370ustar00rootroot00000000000000PNG  IHDRL\tEXtCreation Time06/05/04MqtIME6C/- pHYs B4gAMA a%dIDATx}`ՙJ,E.lml0lBB3$rBGHt|@ &pi6`l^V:;3ohjW;; ћ{<N>/N0t qS3`LxIJ68O3.;!i5vbgXq,/@trWxaIp˝n_74á8nQF]niƄS m#E{CD:bYu]]K 46d6h$ޔAwCkϞ#-Ǻǻ]N4TUo59Ls&ee z-nz֬' b yx~zpPo|v6cRNKt'9][~cc/Y}q0ȣ 15ye9 &L+/qX=΍_~}M^Ǭn!x@CA'Áe'^zRIC_`7G*lZK(NvzLK=viןftR}{kkizJG&%8;1w8"fZ)Ψ3 '#B4woowI.塼p'x\O[< s2F wt+VS}[syW{20x|׿춆IV?_,]dIJܟW(j,Ꮘ@"~\Z]+J s0 ݰm`JYY' ߹}٪E'c@c[ϔ_"<4fMxjjN~mπƤxD2?掋ᚆj'>vvQ G㓵n$jt`>wRΛ 3{}]Ga!R?VYRw jcIQD{i˾}۲Kf+ ׭S1 z;ѧnD Y|K,}R{-yY6_(8:TmX{A/Ԍ=Z Qg`VO)sXξ6u.W/|\<ь"| nG8!?T.1CyEE>I=3ɢ3qo}=KCgn/;okVtG_WBu];א'G|mif;HyJ2gЦ uk&Tdasf?q@otI*իhW]7mGlAݿ*8sJtrܪ'߫K0¦s;Osf!7" J G4jT.D%8|t+[V'fdk~zS{tY/ga Фʮ}N6P)4:͂ Ywee 9M]/}^[5G&Zz=mz}G[هOjcmzJDs)A(ZV9 >6;`X/Q7n(ө0Vo=٬|KdYc+%EJh>!SO8ګ|]_Li0A; + yYs_@>w5Dx' :˚;:z2/8w"I9^wbJ! ƵE &X ewgBQ QQ&%sȇ}]?ɐBC-;ƒakw~mtYDy@ %BvQKf3V!bwG;3)A)Kj&̛VT))ǿ-4N2jT1.nYt' NaH2dEzͯ|=qgyfiyʿ8VH&Xav!?kʱvќVADJYSKZY>|&lL`=wAldD92!,TMA:RS4Nw_X{Ҽ5g~ۤ=e/]g>>ͬO|ՁO\g y';問rlX'~?^Ұ<})C]DW;VJ 3OHHJ %|\}𵸨*Ϝsؚ>2:d3!Mmz a u~7 ڒlY95uI.K a(͌8Zjb~W*ȇɐkMBzUJh lScoDrn5UfFvل  -Pϩ(0Ow_6i{?plJ0O;4Bid{:mdD=0`})||La\'"XȽ@MQ}׿H: }5̛V<8G:j£ trAQY><ڣ+׾jRn=MÖCtUyr_Swx}聑8o ``PTŤ6Kbɂ!b93Ļ{\aȽ.1i6kDYj3{zqu[*a%P׵eS8A/ER3d lo蒽H)@aN@y0]?:{u ڴ<'d}q[Ha? yyjIAG:Y"ʐpPFc ޙ7X+˲3п ~pjh@pF 0 tM>'"J" ZI͎&"ȳ!ZD)C&C/_2p|d 4Ai:aazY`x9!Z07] :c֜L:!L;x0wy+ k'ѝ zPAJ\?ظNCUu\db (Ǎ4ta "p@B /^2fX;{NEbz_d4w0<ʳTdw#Rj$?60UBHq=Թ`P-JBJ%eT ) dd % 0[bD\ Ăb+B탕IC5& 3}\xCF=:c#K1-f=𠗽xh:| >%FH;/w1xARX{S>09B\j`al.0,Dž¥^ ЅOqyp%8dVT?UJe2`ABH& ZJ "wM% k#*=}L6 u| ߃ymUQ?`vpL.fXr) )d=d'8ARӮ'P3pEg D7."Z^1H&}O%l\x-%Y)ʍ0d,ϥV F68*Qk>)<+J2n08P^e(ǟg-/NX;P|MSCi $Jٴҁ|kQtyhcYa1?]p@FYfF!#W'#E OTg;܆^}:cF 559VgpT\!MÓPgΚQ/7Y?Aw7*ƄrFA/ƍ^y9<,s)pp%dJ }b4V{Y:ѐްh 0>0Rԇ|sg 95D2VMGM;0&SXJ""UTwG#0ͪOd{t3L:yRF .3SWȟ1y]=Jc hsRf~9baBV΃ҡ"߻s!ZZY-^Yq@ĦSzA3[HOVM\\E &φ-փ%sp\ۃE&@N!6f]찟aqouT]̓ Hqo;dx1=i v?by(f1@÷_cUhB` ")I,͞3w«-Ձ)aPmծX\ ]VI4acߤH@p"#R}ќĿo蒥0?7 LH}NCa$,rh|fZ<s=Na^ e>t`TRcD D>n엓*yJFtwe&NF?-">_c]tW y`X31aGy^Qcp^'!͢:x]:Dvy|$.ӲU0s;'S\=l'y}|x~4F? -ֆn_צ0P7q n;@*\};4Hrf n=@RTyG9[S|_JQ 0 u}(Me ϟoD=ߙ&OEe_ vẅW;W=^G,`Vgϴ&"ѥ;,DOӡRsdZ,>wKݼҔ^J:>@dYT򔈓;\5cGIV÷S8 EJj1ɻVp̳c[4& ^iuap(E닒[MdFc#YX{ @iF_-Ga/2'Eg^)Y_MzfX-2w6=Mg_1/)֪44Yng^Hs1g 'qYpN;եK6NiE-,$^.?)Gy"¾X0Yah&˸V&>ލ^i&8C _9,M`LH~C~r5+gVDZ0M=,b fwnvO>odsb.:!A?z P`~ #<(!1i f R{GMU{A7;`a{g1¥y A":Kj';FQnfEa( o kwПbq/;hZ{Kv8pc ҤAPO!&ꨤ0 O (Q&fCÛilrzYfPmu2JҌz8y@9%[b d03a*}^',^ֻiFgèYy vw@I{z <)-laQO&d%DL lC@IY:~G s(쮈k!1-y5/ז?yi F3^o^Ci}ahJaÔj?1Eb<9 /QX[>߻㮎SsEM؏n蒃_}z Pa̲ p݁+=hX(hc Z) ۍڑ^XKF]ꉰc0֗hIFB^p0~R>]ϑ01u @75J94*p<9Da1ࣸ]?z'@(<_eQjE7C)! ¹ƠhFf f2BRQ,j;S!fs33 Ng.1Ic:=K~T dK5  T1~Y<838 #ᔫ_ o< w6T11(a"s*_H6D`;IdG̀GL~c(G҄ u$m}9SHFf(#2J;Fb Lݡm:,i+N^ȅF e@"B? ;ߟ|RpI ZN߱tWDgsr}QC3AҚD'49f$RbSӓW5J&APp! MI*Lў,6!g,3-(-FB?H:e , ҈"IEi~lVa=eQ o` Q뇶ړG̪!lV xu,3:M\ (хfO . ~ąҡr.ȟqsu@L>m,1 tu^VSmjסD#(%7*_⑮DQ j8;[a7z ,Z #!NsNKT\~ +/4? 8t&*ǹnj#=CFʊslظZiG l'$5{>na>HmC8tY3@XyXI+a$R5^R!-t!$8%= _(@ V_أ{׺b|NZpZu.t0 o-2NZ9c320l1V0rriuD[)&Pe IsۡW͎@(ur d>i9-i@޽u0t@ =]`bASpe61f!2 =0t3 4#)(w^TOt F8!hYO "+"6=@MJ EGoĝF恌xp$B*-=&q0D& X0J2xǥ@i(Z^;ClhP"e|G[RsL9}9-d4H3o  d򋿡bP@iXJ9Pz=%%3e/ Ob: PHS1 QүirUԶ {4[%kJ-Ӏe`I@nP`2!R@0U HEtd$CMt qS3`L<IENDB`pygresql-5.1.2/docs/_templates/000077500000000000000000000000001365010227600164545ustar00rootroot00000000000000pygresql-5.1.2/docs/_templates/layout.html000066400000000000000000000030701365010227600206570ustar00rootroot00000000000000{%- extends "cloud/layout.html" %} {% set css_files = css_files + ["_static/pygresql.css"] %} {# This layout adds a page header above the standard layout. It also removes the relbars from all pages that are not part of the core documentation in the contents/ directory, adapting the navigation bar (breadcrumb) appropriately. #} {% set is_content = pagename.startswith(('contents/', 'genindex', 'modindex', 'py-', 'search')) %} {% if is_content %} {% set master_doc = 'contents/index' %} {% set parents = parents[1:] %} {% endif %} {% block header %} {% endblock %} {% block relbar1 -%} {%- if is_content -%} {{ super() }} {% else %}
{%- endif -%} {%- endblock %} {% block relbar2 -%} {%- if is_content -%} {{ super() }} {%- else -%}
{%- endif -%} {%- endblock %} {% block content -%} {%- if is_content -%} {{ super() }} {%- else -%}
{{ super() }}
{%- endif -%} {%- endblock %} pygresql-5.1.2/docs/about.rst000066400000000000000000000000651365010227600161640ustar00rootroot00000000000000About PyGreSQL ============== .. include:: about.txtpygresql-5.1.2/docs/about.txt000066400000000000000000000044701365010227600161770ustar00rootroot00000000000000**PyGreSQL** is an *open-source* `Python `_ module that interfaces to a `PostgreSQL `_ database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. | This software is copyright © 1995, Pascal Andre. | Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain. | Further modifications are copyright © 2009-2020 by the PyGreSQL team. | For licensing details, see the full :doc:`copyright`. **PostgreSQL** is a highly scalable, SQL compliant, open source object-relational database management system. With more than 20 years of development history, it is quickly becoming the de facto database for enterprise level open source solutions. Best of all, PostgreSQL's source code is available under the most liberal open source license: the BSD license. **Python** Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is copyrighted but freely usable and distributable, even for commercial use. **PyGreSQL** is a Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script or application. PyGreSQL is developed and tested on a NetBSD system, but it also runs on most other platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. The current version PyGreSQL 5.1.2 needs PostgreSQL 9.0 to 9.6 or 10 to 12, and Python 2.6, 2.7 or 3.3 to 3.8. If you need to support older PostgreSQL versions or older Python 2.x versions, you can resort to the PyGreSQL 4.x versions that still support them. pygresql-5.1.2/docs/announce.rst000066400000000000000000000014211365010227600166550ustar00rootroot00000000000000====================== PyGreSQL Announcements ====================== --------------------------------- Release of PyGreSQL version 5.1.2 --------------------------------- Release 5.1.2 of PyGreSQL. It is available at: https://pypi.org/project/PyGreSQL/. If you are running NetBSD, look in the packages directory under databases. There is also a package in the FreeBSD ports collection. Please refer to `changelog.txt `_ for things that have changed in this version. This version has been built and unit tested on: - NetBSD - FreeBSD - openSUSE - Ubuntu - Windows 7 and 10 with both MinGW and Visual Studio - PostgreSQL 9.0 to 9.6 and 10 to 12 (32 and 64bit) - Python 2.6, 2.7 and 3.3 to 3.8 (32 and 64bit) | D'Arcy J.M. Cain | darcy@PyGreSQL.org pygresql-5.1.2/docs/community/000077500000000000000000000000001365010227600163435ustar00rootroot00000000000000pygresql-5.1.2/docs/community/homes.rst000066400000000000000000000002401365010227600202040ustar00rootroot00000000000000Project home sites ------------------ **Python**: http://www.python.org **PostgreSQL**: http://www.postgresql.org **PyGreSQL**: http://www.pygresql.orgpygresql-5.1.2/docs/community/index.rst000066400000000000000000000007311365010227600202050ustar00rootroot00000000000000PyGreSQL Development and Support ================================ PyGreSQL is an open-source project created by a group of volunteers. The project and the development infrastructure are currently maintained by D'Arcy J.M. Cain. We would be glad to welcome more contributors so that PyGreSQL can be further developed, modernized and improved. .. include:: mailinglist.rst .. include:: source.rst .. include:: issues.rst .. include:: support.rst .. include:: homes.rst pygresql-5.1.2/docs/community/issues.rst000066400000000000000000000002241365010227600204060ustar00rootroot00000000000000Issue Tracker ------------- Bug reports and enhancement requests can be posted as `GitHub issues `_. pygresql-5.1.2/docs/community/mailinglist.rst000066400000000000000000000005611365010227600214130ustar00rootroot00000000000000Mailing list ------------ You can join `the mailing list `_ to discuss future development of the PyGreSQL interface or if you have questions or problems with PyGreSQL that are not covered in the :doc:`documentation <../contents/index>`. This is usually a low volume list except when there are new features being added. pygresql-5.1.2/docs/community/source.rst000066400000000000000000000014431365010227600203770ustar00rootroot00000000000000Access to the source repository ------------------------------- The source code of PyGreSQL is available as a `Git `_ repository on `GitHub `_. The current master branch of the repository can be cloned with the command:: git clone https://github.com/PyGreSQL/PyGreSQL.git You can also download the master branch as a `zip archive `_. Contributions can be proposed as `pull requests `_ on GitHub. Before starting to work on larger contributions, please discuss with the core developers using the `mailing list `_ or in a `GitHub issues `_. pygresql-5.1.2/docs/community/support.rst000066400000000000000000000014171365010227600206140ustar00rootroot00000000000000Support ------- **Python**: see http://www.python.org/community/ **PostgreSQL**: see http://www.postgresql.org/support/ **PyGreSQL**: Join `the PyGreSQL mailing list `_ if you need help regarding PyGreSQL. You can also ask questions regarding PyGreSQL on `Stack Overflow `_. Please use `GitHub issues `_ only for bug reports and enhancement requests, not for questions about usage of PyGreSQL. Please note that messages to individual developers will generally not be answered directly. All questions, comments and code changes must be submitted to the mailing list for peer review and archiving purposes. pygresql-5.1.2/docs/conf.py000066400000000000000000000241471365010227600156260ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # PyGreSQL documentation build configuration file. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import shutil # Import Cloud theme (this will also automatically add the theme directory). # Note: We add a navigation bar to the cloud them using a custom layout. if os.environ.get('READTHEDOCS', None) == 'True': # We cannot use our custom layout here, since RTD overrides layout.html. use_cloud_theme = False else: try: import cloud_sptheme use_cloud_theme = True except ImportError: use_cloud_theme = False shutil.copyfile('start.txt' if use_cloud_theme else 'toc.txt', 'index.rst') # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] if use_cloud_theme else [] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'PyGreSQL' author = 'The PyGreSQL team' copyright = '2020, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '5.1' # The full version, including alpha/beta/rc tags. release = '5.1.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # List of pages which are included in other pages and therefore should # not appear in the toctree. exclude_patterns += [ 'download/download.rst', 'download/files.rst', 'community/mailinglist.rst', 'community/source.rst', 'community/issues.rst', 'community/support.rst', 'community/homes.rst'] if use_cloud_theme: exclude_patterns += ['about.rst'] # The reST default role (used for this markup: `text`) for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'cloud' if use_cloud_theme else 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. if use_cloud_theme: html_theme_options = { 'roottarget': 'contents/index', 'defaultcollapsed': True, 'shaded_decor': True} else: html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'PyGreSQL %s' % version if use_cloud_theme: html_title += ' documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/pygresql.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'PyGreSQLdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PyGreSQL.tex', 'PyGreSQL Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pygresql', 'PyGreSQL Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'PyGreSQL', u'PyGreSQL Documentation', author, 'PyGreSQL', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False pygresql-5.1.2/docs/contents/000077500000000000000000000000001365010227600161545ustar00rootroot00000000000000pygresql-5.1.2/docs/contents/changelog.rst000066400000000000000000000757051365010227600206530ustar00rootroot00000000000000ChangeLog ========= Version 5.1.2 (2020-04-19) -------------------------- - Improved handling of build_ext options for disabling certain features. - Avoid compiler warnings with proper casts. This should solve problems when building PyGreSQL on MaCOS. - Export only the public API on wildcard imports Version 5.1.1 (2020-03-05) -------------------------- - This version officially supports the new Python 3.8 and PostgreSQL 12. - This version changes internal queries so that they cannot be exploited using a PostgreSQL security vulnerability described as CVE-2018-1058. - Removed NO_PQSOCKET switch which is not needed any longer. - Fixed documentation for other compilation options which had been renamed. - Started using GitHub as development platform. Version 5.1 (2019-05-17) ------------------------ - Changes to the classic PyGreSQL module (pg): - Support for prepared statements (following a suggestion and first implementation by Justin Pryzby on the mailing list). - DB wrapper objects based on existing connections can now be closed and reopened properly (but the underlying connection will not be affected). - The query object can now be used as an iterator similar to query.getresult() and will then yield the rows as tuples. Thanks to Justin Pryzby for the proposal and most of the implementation. - Deprecated query.ntuples() in the classic API, since len(query) can now be used and returns the same number. - The i-th row of the result can now be accessed as `query[i]`. - New method query.scalarresult() that gets only the first field of each row as a list of scalar values. - New methods query.one(), query.onenamed(), query.onedict() and query.onescalar() that fetch only one row from the result or None if there are no more rows, similar to the cursor.fetchone() method in DB-API 2. - New methods query.single(), query.singlenamed(), query.singledict() and query.singlescalar() that fetch only one row from the result, and raise an error if the result does not have exactly one row. - New methods query.dictiter(), query.namediter() and query.scalariter() returning the same values as query.dictresult(), query.namedresult() and query.scalarresult(), but as iterables instead of lists. This avoids creating a Python list of all results and can be slightly more efficient. - Removed pg.get/set_namedresult. You can configure the named tuples factory with the pg.set_row_factory_size() function and change the implementation with pg.set_query_helpers(), but this is not recommended and this function is not part of the official API. - Added new connection attributes `socket`, `backend_pid`, `ssl_in_use` and `ssl_attributes` (the latter need PostgreSQL >= 9.5 on the client). - Changes to the DB-API 2 module (pgdb): - Connections now have an `autocommit` attribute which is set to `False` by default but can be set to `True` to switch to autocommit mode where no transactions are started and calling commit() is not required. Note that this is not part of the DB-API 2 standard. Vesion 5.0.7 (2019-05-17) ------------------------- - This version officially supports the new PostgreSQL 11. - Fixed a bug in parsing array subscript ranges (reported by Justin Pryzby). - Fixed an issue when deleting a DB wrapper object with the underlying connection already closed (bug report by Jacob Champion). Vesion 5.0.6 (2018-07-29) ------------------------- - This version officially supports the new Python 3.7. - Correct trove classifier for the PostgreSQL License. Version 5.0.5 (2018-04-25) -------------------------- - This version officially supports the new PostgreSQL 10. - The memory for the string with the number of rows affected by a classic pg module query() was already freed (bug report and fix by Peifeng Qiu). Version 5.0.4 (2017-07-23) -------------------------- - This version officially supports the new Python 3.6 and PostgreSQL 9.6. - query_formatted() can now be used without parameters. - The automatic renaming of columns that are invalid as field names of named tuples now works more accurately in Python 2.6 and 3.0. - Fixed error checks for unlink() and export() methods of large objects (bug report by Justin Pryzby). - Fixed a compilation issue under OS X (bug report by Josh Johnston). Version 5.0.3 (2016-12-10) -------------------------- - It is now possible to use a custom array cast function by changing the type caster for the 'anyarray' type. For instance, by calling set_typecast('anyarray', lambda v, c: v) you can have arrays returned as strings instead of lists. Note that in the pg module, you can also call set_array(False) in order to return arrays as strings. - The namedtuple classes used for the rows of query results are now cached and reused internally, since creating namedtuples classes in Python is a somewhat expensive operation. By default the cache has a size of 1024 entries, but this can be changed with the set_row_factory_size() function. In certain cases this change can notably improve the performance. - The namedresult() method in the classic API now also tries to rename columns that would result in invalid field names. Version 5.0.2 (2016-09-13) -------------------------- - Fixed an infinite recursion problem in the DB wrapper class of the classic module that could occur when the underlying connection could not be properly opened (bug report by Justin Pryzby). Version 5.0.1 (2016-08-18) -------------------------- - The update() and delete() methods of the DB wrapper now use the OID instead of the primary key if both are provided. This restores backward compatibility with PyGreSQL 4.x and allows updating the primary key itself if an OID exists. - The connect() function of the DB API 2.0 module now accepts additional keyword parameters such as "application_name" which will be passed on to PostgreSQL. - PyGreSQL now adapts some queries to be able to access older PostgreSQL 8.x databases (as suggested on the mailing list by Andres Mejia). However, these old versions of PostgreSQL are not officially supported and tested any more. - Fixed an issue with Postgres types that have an OID >= 0x80000000 (reported on the mailing list by Justin Pryzby). - Allow extra values that are not used in the command in the parameter dict passed to the query_formatted() method (as suggested by Justin Pryzby). - Improved handling of empty arrays in the classic module. - Unused classic connections were not properly garbage collected which could cause memory leaks (reported by Justin Pryzby). - Made C extension compatible with MSVC 9 again (this was needed to compile for Python 2 on Windows). Version 5.0 (2016-03-20) ------------------------ - This version now runs on both Python 2 and Python 3. - The supported versions are Python 2.6 to 2.7, and 3.3 to 3.5. - PostgreSQL is supported in all versions from 9.0 to 9.5. - Changes in the classic PyGreSQL module (pg): - The classic interface got two new methods get_as_list() and get_as_dict() returning a database table as a Python list or dict. The amount of data returned can be controlled with various parameters. - A method upsert() has been added to the DB wrapper class that utilizes the "upsert" feature that is new in PostgreSQL 9.5. The new method nicely complements the existing get/insert/update/delete() methods. - When using insert/update/upsert(), you can now pass PostgreSQL arrays as lists and PostgreSQL records as tuples in the classic module. - Conversely, when the query method returns a PostgreSQL array, it is passed to Python as a list. PostgreSQL records are converted to named tuples as well, but only if you use one of the get/insert/update/delete() methods. PyGreSQL uses a new fast built-in parser to achieve this. The automatic conversion of arrays to lists can be disabled with set_array(False). - The pkey() method of the classic interface now returns tuples instead of frozensets, with the same order of columns as the primary key index. - Like the DB-API 2 module, the classic module now also returns bool values from the database as Python bool objects instead of strings. You can still restore the old behavior by calling set_bool(False). - Like the DB-API 2 module, the classic module now also returns bytea data fetched from the database as byte strings, so you don't need to call unescape_bytea() any more. This has been made configurable though, and you can restore the old behavior by calling set_bytea_escaped(True). - A method set_jsondecode() has been added for changing or removing the function that automatically decodes JSON data coming from the database. By default, decoding JSON is now enabled and uses the decoder function in the standard library with its default parameters. - The table name that is affixed to the name of the OID column returned by the get() method of the classic interface will not automatically be fully qualified any more. This reduces overhead from the interface, but it means you must always write the table name in the same way when you are using tables with OIDs and call methods that make use of these. Also, OIDs are now only used when access via primary key is not possible. Note that OIDs are considered deprecated anyway, and they are not created by default any more in PostgreSQL 8.1 and later. - The internal caching and automatic quoting of class names in the classic interface has been simplified and improved, it should now perform better and use less memory. Also, overhead for quoting values in the DB wrapper methods has been reduced and security has been improved by passing the values to libpq separately as parameters instead of inline. - It is now possible to use the registered type names instead of the more coarse-grained type names that are used by default in PyGreSQL, without breaking any of the mechanisms for quoting and typecasting, which rely on the type information. This is achieved while maintaining simplicity and backward compatibility by augmenting the type name string objects with all the necessary information under the cover. To switch registered type names on or off (this is the default), call the DB wrapper method use_regtypes(). - A new method query_formatted() has been added to the DB wrapper class that allows using the format specifications from Python. A flag "inline" can be set to specify whether parameters should be sent to the database separately or formatted into the SQL. - A new type helper Bytea() has been added. - Changes in the DB-API 2 module (pgdb): - The DB-API 2 module now always returns result rows as named tuples instead of simply lists as before. The documentation explains how you can restore the old behavior or use custom row objects instead. - Various classes used by the classic and DB-API 2 modules have been renamed to become simpler, more intuitive and in line with the names used in the DB-API 2 documentation. Since the API provides objects of these types only through constructor functions, this should not cause any incompatibilities. - The DB-API 2 module now supports the callproc() cursor method. Note that output parameters are currently not replaced in the return value. - The DB-API 2 module now supports copy operations between data streams on the client and database tables via the COPY command of PostgreSQL. The cursor method copy_from() can be used to copy data from the database to the client, and the cursor method copy_to() can be used to copy data from the client to the database. - The 7-tuples returned by the description attribute of a pgdb cursor are now named tuples, i.e. their elements can be also accessed by name. The column names and types can now also be requested through the colnames and coltypes attributes, which are not part of DB-API 2 though. The type_code provided by the description attribute is still equal to the PostgreSQL internal type name, but now carries some more information in additional attributes. The size, precision and scale information that is part of the description is now properly set for numeric types. - If you pass a Python list as one of the parameters to a DB-API 2 cursor, it is now automatically bound using an ARRAY constructor. If you pass a Python tuple, it is bound using a ROW constructor. This is useful for passing records as well as making use of the IN syntax. - Inversely, when a fetch method of a DB-API 2 cursor returns a PostgreSQL array, it is passed to Python as a list, and when it returns a PostgreSQL composite type, it is passed to Python as a named tuple. PyGreSQL uses a new fast built-in parser to achieve this. Anonymous composite types are also supported, but yield only an ordinary tuple containing text strings. - New type helpers Interval() and Uuid() have been added. - The connection has a new attribute "closed" that can be used to check whether the connection is closed or broken. - SQL commands are always handled as if they include parameters, i.e. literal percent signs must always be doubled. This consistent behavior is necessary for using pgdb with wrappers like SQLAlchemy. - PyGreSQL 5.0 will be supported as a database driver by SQLAlchemy 1.1. - Changes concerning both modules: - PyGreSQL now tries to raise more specific and appropriate subclasses of DatabaseError than just ProgrammingError. Particularly, when database constraints are violated, it raises an IntegrityError now. - The modules now provide get_typecast() and set_typecast() methods allowing to control the typecasting on the global level. The connection objects have type caches with the same methods which give control over the typecasting on the level of the current connection. See the documentation for details about the type cache and the typecast mechanisms provided by PyGreSQL. - Dates, times, timestamps and time intervals are now returned as the corresponding Python objects from the datetime module of the standard library. In earlier versions of PyGreSQL they had been returned as strings. You can restore the old behavior by deactivating the respective typecast functions, e.g. set_typecast('date', str). - PyGreSQL now supports the "uuid" data type, converting such columns automatically to and from Python uuid.UUID objects. - PyGreSQL now supports the "hstore" data type, converting such columns automatically to and from Python dictionaries. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new HStore() type constructor as a hint to PyGreSQL. - PyGreSQL now supports the "json" and "jsonb" data types, converting such columns automatically to and from Python objects. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new Json() type constructor as a hint to PyGreSQL. - A new type helper Literal() for inserting parameters literally as SQL has been added. This is useful for table names, for instance. - Fast parsers cast_array(), cast_record() and cast_hstore for the input and output syntax for PostgreSQL arrays, composite types and the hstore type have been added to the C extension module. The array parser also allows using multi-dimensional arrays with PyGreSQL. - The tty parameter and attribute of database connections has been removed since it is not supported by PostgreSQL versions newer than 7.4. Version 4.2.2 (2016-03-18) -------------------------- - The get_relations() and get_tables() methods now also return system views and tables if you set the optional "system" parameter to True. - Fixed a regression when using temporary tables with DB wrapper methods (thanks to Patrick TJ McPhee for reporting). Version 4.2.1 (2016-02-18) -------------------------- - Fixed a small bug when setting the notice receiver. - Some more minor fixes and re-packaging with proper permissions. Version 4.2 (2016-01-21) ------------------------ - The supported Python versions are 2.4 to 2.7. - PostgreSQL is supported in all versions from 8.3 to 9.5. - Set a better default for the user option "escaping-funcs". - Force build to compile with no errors. - New methods get_parameters() and set_parameters() in the classic interface which can be used to get or set run-time parameters. - New method truncate() in the classic interface that can be used to quickly empty a table or a set of tables. - Fix decimal point handling. - Add option to return boolean values as bool objects. - Add option to return money values as string. - get_tables() does not list information schema tables any more. - Fix notification handler (Thanks Patrick TJ McPhee). - Fix a small issue with large objects. - Minor improvements of the NotificationHandler. - Converted documentation to Sphinx and added many missing parts. - The tutorial files have become a chapter in the documentation. - Greatly improved unit testing, tests run with Python 2.4 to 2.7 again. Version 4.1.1 (2013-01-08) -------------------------- - Add NotificationHandler class and method. Replaces need for pgnotify. - Sharpen test for inserting current_timestamp. - Add more quote tests. False and 0 should evaluate to NULL. - More tests - Any number other than 0 is True. - Do not use positional parameters internally. This restores backward compatibility with version 4.0. - Add methods for changing the decimal point. Version 4.1 (2013-01-01) ------------------------ - Dropped support for Python below 2.5 and PostgreSQL below 8.3. - Added support for Python up to 2.7 and PostgreSQL up to 9.2. - Particularly, support PQescapeLiteral() and PQescapeIdentifier(). - The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors (contribution by Patrick TJ McPhee). - The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher). - The classic API has got the new methods begin(), commit(), rollback(), savepoint() and release() for handling transactions. - Both classic and DBAPI 2 connections can now be used as context managers for encapsulating transactions. - The execute() and executemany() methods now return the cursor object, so you can now write statements like "for row in cursor.execute(...)" (as suggested by Adam Frederick). - Binary objects are now automatically escaped and unescaped. - Bug in money quoting fixed. Amounts of $0.00 handled correctly. - Proper handling of date and time objects as input. - Proper handling of floats with 'nan' or 'inf' values as input. - Fixed the set_decimal() function. - All DatabaseError instances now have a sqlstate attribute. - The getnotify() method can now also return payload strings (#15). - Better support for notice processing with the new methods set_notice_receiver() and get_notice_receiver() (as suggested by Michael Filonenko, see #37). - Open transactions are rolled back when pgdb connections are closed (as suggested by Peter Harris, see #46). - Connections and cursors can now be used with the "with" statement (as suggested by Peter Harris, see #46). - New method use_regtypes() that can be called to let getattnames() return registered type names instead of the simplified classic types (#44). Version 4.0 (2009-01-01) ------------------------ - Dropped support for Python below 2.3 and PostgreSQL below 7.4. - Improved performance of fetchall() for large result sets by speeding up the type casts (as suggested by Peter Schuller). - Exposed exceptions as attributes of the connection object. - Exposed connection as attribute of the cursor object. - Cursors now support the iteration protocol. - Added new method to get parameter settings. - Added customizable row_factory as suggested by Simon Pamies. - Separated between mandatory and additional type objects. - Added keyword args to insert, update and delete methods. - Added exception handling for direct copy. - Start transactions only when necessary, not after every commit(). - Release the GIL while making a connection (as suggested by Peter Schuller). - If available, use decimal.Decimal for numeric types. - Allow DB wrapper to be used with DB-API 2 connections (as suggested by Chris Hilton). - Made private attributes of DB wrapper accessible. - Dropped dependence on mx.DateTime module. - Support for PQescapeStringConn() and PQescapeByteaConn(); these are now also used by the internal _quote() functions. - Added 'int8' to INTEGER types. New SMALLINT type. - Added a way to find the number of rows affected by a query() with the classic pg module by returning it as a string. For single inserts, query() still returns the oid as an integer. The pgdb module already provides the "rowcount" cursor attribute for the same purpose. - Improved getnotify() by calling PQconsumeInput() instead of submitting an empty command. - Removed compatibility code for old OID munging style. - The insert() and update() methods now use the "returning" clause if possible to get all changed values, and they also check in advance whether a subsequent select is possible, so that ongoing transactions won't break if there is no select privilege. - Added "protocol_version" and "server_version" attributes. - Revived the "user" attribute. - The pg module now works correctly with composite primary keys; these are represented as frozensets. - Removed the undocumented and actually unnecessary "view" parameter from the get() method. - get() raises a nicer ProgrammingError instead of a KeyError if no primary key was found. - delete() now also works based on the primary key if no oid available and returns whether the row existed or not. Version 3.8.1 (2006-06-05) -------------------------- - Use string methods instead of deprecated string functions. - Only use SQL-standard way of escaping quotes. - Added the functions escape_string() and escape/unescape_bytea() (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago). - Reverted code in clear() method that set date to current. - Added code for backwards compatibility in OID munging code. - Reorder attnames tests so that "interval" is checked for before "int." - If caller supplies key dictionary, make sure that all has a namespace. Version 3.8 (2006-02-17) ------------------------ - Installed new favicon.ico from Matthew Sporleder - Replaced snprintf by PyOS_snprintf - Removed NO_SNPRINTF switch which is not needed any longer - Clean up some variable names and namespace - Add get_relations() method to get any type of relation - Rewrite get_tables() to use get_relations() - Use new method in get_attnames method to get attributes of views as well - Add Binary type - Number of rows is now -1 after executing no-result statements - Fix some number handling - Non-simple types do not raise an error any more - Improvements to documentation framework - Take into account that nowadays not every table must have an oid column - Simplification and improvement of the inserttable() function - Fix up unit tests - The usual assortment of minor fixes and enhancements Version 3.7 (2005-09-07) ------------------------ Improvement of pgdb module: - Use Python standard `datetime` if `mxDateTime` is not available Major improvements and clean-up in classic pg module: - All members of the underlying connection directly available in `DB` - Fixes to quoting function - Add checks for valid database connection to methods - Improved namespace support, handle `search_path` correctly - Removed old dust and unnecessary imports, added docstrings - Internal sql statements as one-liners, smoothed out ugly code Version 3.6.2 (2005-02-23) -------------------------- - Further fixes to namespace handling Version 3.6.1 (2005-01-11) -------------------------- - Fixes to namespace handling Version 3.6 (2004-12-17) ------------------------ - Better DB-API 2.0 compliance - Exception hierarchy moved into C module and made available to both APIs - Fix error in update method that caused false exceptions - Moved to standard exception hierarchy in classic API - Added new method to get transaction state - Use proper Python constants where appropriate - Use Python versions of strtol, etc. Allows Win32 build. - Bug fixes and cleanups Version 3.5 (2004-08-29) ------------------------ Fixes and enhancements: - Add interval to list of data types - fix up method wrapping especially close() - retry pkeys once if table missing in case it was just added - wrap query method separately to handle debug better - use isinstance instead of type - fix free/PQfreemem issue - finally - miscellaneous cleanups and formatting Version 3.4 (2004-06-02) ------------------------ Some cleanups and fixes. This is the first version where PyGreSQL is moved back out of the PostgreSQL tree. A lot of the changes mentioned below were actually made while in the PostgreSQL tree since their last release. - Allow for larger integer returns - Return proper strings for true and false - Cleanup convenience method creation - Enhance debugging method - Add reopen method - Allow programs to preload field names for speedup - Move OID handling so that it returns long instead of int - Miscellaneous cleanups and formatting Version 3.3 (2001-12-03) ------------------------ A few cleanups. Mostly there was some confusion about the latest version and so I am bumping the number to keep it straight. - Added NUMERICOID to list of returned types. This fixes a bug when returning aggregates in the latest version of PostgreSQL. Version 3.2 (2001-06-20) ------------------------ Note that there are very few changes to PyGreSQL between 3.1 and 3.2. The main reason for the release is the move into the PostgreSQL development tree. Even the WIN32 changes are pretty minor. - Add Win32 support (gerhard@bigfoot.de) - Fix some DB-API quoting problems (niall.smart@ebeon.com) - Moved development into PostgreSQL development tree. Version 3.1 (2000-11-06) ------------------------ - Fix some quoting functions. In particular handle NULLs better. - Use a method to add primary key information rather than direct manipulation of the class structures - Break decimal out in `_quote` (in pg.py) and treat it as float - Treat timestamp like date for quoting purposes - Remove a redundant SELECT from the `get` method speeding it, and `insert` (since it calls `get`) up a little. - Add test for BOOL type in typecast method to `pgdbTypeCache` class (tv@beamnet.de) - Fix pgdb.py to send port as integer to lower level function (dildog@l0pht.com) - Change pg.py to speed up some operations - Allow updates on tables with no primary keys Version 3.0 (2000-05-30) ------------------------ - Remove strlen() call from pglarge_write() and get size from object (Richard@Bouska.cz) - Add a little more error checking to the quote function in the wrapper - Add extra checking in `_quote` function - Wrap query in pg.py for debugging - Add DB-API 2.0 support to pgmodule.c (andre@via.ecp.fr) - Add DB-API 2.0 wrapper pgdb.py (andre@via.ecp.fr) - Correct keyword clash (temp) in tutorial - Clean up layout of tutorial - Return NULL values as None (rlawrence@lastfoot.com) (WARNING: This will cause backwards compatibility issues) - Change None to NULL in insert and update - Change hash-bang lines to use /usr/bin/env - Clearing date should be blank (NULL) not TODAY - Quote backslashes in strings in `_quote` (brian@CSUA.Berkeley.EDU) - Expanded and clarified build instructions (tbryan@starship.python.net) - Make code thread safe (Jerome.Alet@unice.fr) - Add README.distutils (mwa@gate.net & jeremy@cnri.reston.va.us) - Many fixes and increased DB-API compliance by chifungfan@yahoo.com, tony@printra.net, jeremy@alum.mit.edu and others to get the final version ready to release. Version 2.4 (1999-06-15) ------------------------ - Insert returns None if the user doesn't have select permissions on the table. It can (and does) happen that one has insert but not select permissions on a table. - Added ntuples() method to query object (brit@druid.net) - Corrected a bug related to getresult() and the money type - Corrected a bug related to negative money amounts - Allow update based on primary key if munged oid not available and table has a primary key - Add many __doc__ strings (andre@via.ecp.fr) - Get method works with views if key specified Version 2.3 (1999-04-17) ------------------------ - connect.host returns "localhost" when connected to Unix socket (torppa@tuhnu.cutery.fi) - Use `PyArg_ParseTupleAndKeywords` in connect() (torppa@tuhnu.cutery.fi) - fixes and cleanups (torppa@tuhnu.cutery.fi) - Fixed memory leak in dictresult() (terekhov@emc.com) - Deprecated pgext.py - functionality now in pg.py - More cleanups to the tutorial - Added fileno() method - terekhov@emc.com (Mikhail Terekhov) - added money type to quoting function - Compiles cleanly with more warnings turned on - Returns PostgreSQL error message on error - Init accepts keywords (Jarkko Torppa) - Convenience functions can be overridden (Jarkko Torppa) - added close() method Version 2.2 (1998-12-21) ------------------------ - Added user and password support thanks to Ng Pheng Siong (ngps@post1.com) - Insert queries return the inserted oid - Add new `pg` wrapper (C module renamed to _pg) - Wrapped database connection in a class - Cleaned up some of the tutorial. (More work needed.) - Added `version` and `__version__`. Thanks to thilo@eevolute.com for the suggestion. Version 2.1 (1998-03-07) ------------------------ - return fields as proper Python objects for field type - Cleaned up pgext.py - Added dictresult method Version 2.0 (1997-12-23) ------------------------ - Updated code for PostgreSQL 6.2.1 and Python 1.5 - Reformatted code and converted to use full ANSI style prototypes - Changed name to PyGreSQL (from PyGres95) - Changed order of arguments to connect function - Created new type `pgqueryobject` and moved certain methods to it - Added a print function for pgqueryobject - Various code changes - mostly stylistic Version 1.0b (1995-11-04) ------------------------- - Keyword support for connect function moved from library file to C code and taken away from library - Rewrote documentation - Bug fix in connect function - Enhancements in large objects interface methods Version 1.0a (1995-10-30) ------------------------- A limited release. - Module adapted to standard Python syntax - Keyword support for connect function in library file - Rewrote default parameters interface (internal use of strings) - Fixed minor bugs in module interface - Redefinition of error messages Version 0.9b (1995-10-10) ------------------------- The first public release. - Large objects implementation - Many bug fixes, enhancements, ... Version 0.1a (1995-10-07) ------------------------- - Basic libpq functions (SQL access) pygresql-5.1.2/docs/contents/examples.rst000066400000000000000000000012121365010227600205200ustar00rootroot00000000000000Examples ======== I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to. The :doc:`postgres/index` that is part of the PyGreSQL distribution shows some examples of using PostgreSQL with PyGreSQL. Here is a `list of motorcycle rides in Ontario `_ that uses a PostgreSQL database to store the rides. There is a link at the bottom of the page to view the source code. Oleg Broytmann has written a simple example `RGB database demo `_ pygresql-5.1.2/docs/contents/general.rst000066400000000000000000000036771365010227600203400ustar00rootroot00000000000000General PyGreSQL programming information ---------------------------------------- PyGreSQL consists of two parts: the "classic" PyGreSQL interface provided by the :mod:`pg` module and the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. If you use only the standard features of the DB-API 2.0 interface, it will be easier to switch from PostgreSQL to another database for which a DB-API 2.0 compliant interface exists. The "classic" interface may be easier to use for beginners, and it provides some higher-level and PostgreSQL specific convenience methods. .. seealso:: **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. Both Python modules utilize the same low-level C extension, which serves as a wrapper for the "libpq" library, the C API to PostgreSQL. This means you must have the libpq library installed as a shared library on your client computer, in a version that is supported by PyGreSQL. Depending on the client platform, you may have to set environment variables like `PATH` or `LD_LIBRARY_PATH` so that PyGreSQL can find the library. .. warning:: Note that PyGreSQL is not thread-safe on the connection level. Therefore we recommend using `DBUtils `_ for multi-threaded environments, which supports both PyGreSQL interfaces. Another option is using PyGreSQL indirectly as a database driver for the high-level `SQLAlchemy `_ SQL toolkit and ORM, which supports PyGreSQL starting with SQLAlchemy 1.1 and which provides a way to use PyGreSQL in a multi-threaded environment using the concept of "thread local storage". Database URLs for PyGreSQL take this form:: postgresql+pygresql://username:password@host:port/database pygresql-5.1.2/docs/contents/index.rst000066400000000000000000000010531365010227600200140ustar00rootroot00000000000000The PyGreSQL documentation ========================== Contents -------- .. toctree:: :maxdepth: 1 Installing PyGreSQL What's New and History of Changes General PyGreSQL Programming Information First Steps with PyGreSQL The Classic PyGreSQL Interface The DB-API Compliant Interface A PostgreSQL Primer Examples for using PyGreSQL Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` pygresql-5.1.2/docs/contents/install.rst000066400000000000000000000170711365010227600203620ustar00rootroot00000000000000Installation ============ General ------- You must first install Python and PostgreSQL on your system. If you want to access remote databases only, you don't need to install the full PostgreSQL server, but only the libpq C-interface library. If you are on Windows, make sure that the directory that contains libpq.dll is part of your ``PATH`` environment variable. The current version of PyGreSQL has been tested with Python versions 2.6, 2.7 and 3.3 to 3.8, and PostgreSQL versions 9.0 to 9.6 and 10 to 12. PyGreSQL will be installed as three modules, a shared library called _pg.so (on Linux) or a DLL called _pg.pyd (on Windows), and two pure Python wrapper modules called pg.py and pgdb.py. All three files will be installed directly into the Python site-packages directory. To uninstall PyGreSQL, simply remove these three files. Installing with Pip ------------------- This is the most easy way to install PyGreSQL if you have "pip" installed. Just run the following command in your terminal:: pip install PyGreSQL This will automatically try to find and download a distribution on the `Python Package Index `_ that matches your operating system and Python version and install it. Installing from a Binary Distribution ------------------------------------- If you don't want to use "pip", or "pip" doesn't find an appropriate distribution for your computer, you can also try to manually download and install a distribution. When you download the source distribution, you will need to compile the C extension, for which you need a C compiler installed. If you don't want to install a C compiler or avoid possible problems with the compilation, you can search for a pre-compiled binary distribution of PyGreSQL on the Python Package Index or the PyGreSQL homepage. You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches the Python version you have installed. Install the package as usual on your system. Note that the documentation is currently only included in the source package. Installing from Source ---------------------- If you want to install PyGreSQL from Source, or there is no binary package available for your platform, follow these instructions. Make sure the Python header files and PostgreSQL client and server header files are installed. These come usually with the "devel" packages on Unix systems and the installer executables on Windows systems. If you are using a precompiled PostgreSQL, you will also need the pg_config tool. This is usually also part of the "devel" package on Unix, and will be installed as part of the database server feature on Windows systems. Building and installing with Distutils ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can build and install PyGreSQL using `Distutils `_. Download and unpack the PyGreSQL source tarball if you haven't already done so. Type the following commands to build and install PyGreSQL:: python setup.py install Now you should be ready to use PyGreSQL. You can also run the build step separately if you want to create a distribution to be installed on a different system or explicitly enable or disable certain features. For instance, in order to build PyGreSQL without support for the SSL info functions, run:: python setup.py build_ext --no-ssl-info By default, PyGreSQL is compiled with support for all features available in the installed PostgreSQL version, and you will get warnings for the features that are not supported in this version. You can also explicitly require a feature in order to get an error if it is not available, for instance: python setup.py build_ext --ssl-info You can find out all possible build options with:: python setup.py build_ext --help Alternatively, you can also use the corresponding C preprocessor macros like ``SSL_INFO`` directly (see the next section). Compiling Manually ~~~~~~~~~~~~~~~~~~ The source file for compiling the C extension module is pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter. Stand-Alone ^^^^^^^^^^^ * In the directory containing ``pgmodule.c``, run the following command:: cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c where you have to set:: PYINC = path to the Python include files (usually something like /usr/include/python) PGINC = path to the PostgreSQL client include files (something like /usr/include/pgsql or /usr/include/postgresql) PSINC = path to the PostgreSQL server include files (like /usr/include/pgsql/server or /usr/include/postgresql/server) PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib) If you are not sure about the above paths, try something like:: PYINC=`find /usr -name Python.h` PGINC=`find /usr -name libpq-fe.h` PSINC=`find /usr -name postgres.h` PGLIB=`find /usr -name libpq.so` If you have the ``pg_config`` tool installed, you can set:: PGINC=`pg_config --includedir` PSINC=`pg_config --includedir-server` PGLIB=`pg_config --libdir` Some options may be added to this line:: -DDEFAULT_VARS default variables support -DDIRECT_ACCESS direct access methods -DLARGE_OBJECTS large object support -DESCAPING_FUNCS support for newer escaping functions -DSSL_INFO support SSL information On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * Test the new module. Something like the following should work:: $ python >>> import _pg >>> db = _pg.connect('thilo','localhost') >>> db.query("INSERT INTO test VALUES ('ping','pong')") 18304 >>> db.query("SELECT * FROM test") eins|zwei ----+---- ping|pong (1 row) * Finally, move the ``_pg.so``, ``pg.py``, and ``pgdb.py`` to a directory in your ``PYTHONPATH``. A good place would be ``/usr/lib/python/site-packages`` if your Python modules are in ``/usr/lib/python``. Built-in to Python interpreter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Find the directory where your ``Setup`` file lives (usually in the ``Modules`` subdirectory) in the Python source hierarchy and copy or symlink the ``pgmodule.c`` file there. * Add the following line to your 'Setup' file:: _pg pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq where:: PGINC = path to the PostgreSQL client include files (see above) PSINC = path to the PostgreSQL server include files (see above) PGLIB = path to the PostgreSQL object code libraries (see above) Some options may be added to this line:: -DDEFAULT_VARS default variables support -DDIRECT_ACCESS direct access methods -DLARGE_OBJECTS large object support -DESCAPING_FUNCS support for newer escaping functions -DSSL_INFO support SSL information On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * If you want a shared module, make sure that the ``shared`` keyword is uncommented and add the above line below it. You used to need to install your shared modules with ``make sharedinstall`` but this no longer seems to be true. * Copy ``pg.py`` to the lib directory where the rest of your modules are. For example, that's ``/usr/local/lib/Python`` on my system. * Rebuild Python from the root directory of the Python source hierarchy by running ``make -f Makefile.pre.in boot`` and ``make && make install``. * For more details read the documentation at the top of ``Makefile.pre.in``. pygresql-5.1.2/docs/contents/pg/000077500000000000000000000000001365010227600165625ustar00rootroot00000000000000pygresql-5.1.2/docs/contents/pg/adaptation.rst000066400000000000000000000414401365010227600214430ustar00rootroot00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pg Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ When you use the higher level methods of the classic :mod:`pg` module like :meth:`DB.insert()` or :meth:`DB.update()`, you don't need to care about adaptation of parameters, since all of this is happening automatically behind the scenes. You only need to consider this issue when creating SQL commands manually and sending them to the database using the :meth:`DB.query` method. Imagine you have created a user login form that stores the login name as *login* and the password as *passwd* and you now want to get the user data for that user. You may be tempted to execute a query like this:: >>> db = pg.DB(...) >>> sql = "SELECT * FROM user_table WHERE login = '%s' AND passwd = '%s'" >>> db.query(sql % (login, passwd)).getresult()[0] This seems to work at a first glance, but you will notice an error as soon as you try to use a login name containing a single quote. Even worse, this error can be exploited through so-called "SQL injection", where an attacker inserts malicious SQL statements into the query that you never intended to be executed. For instance, with a login name something like ``' OR ''='`` the attacker could easily log in and see the user data of another user in the database. One solution for this problem would be to cleanse your input of "dangerous" characters like the single quote, but this is tedious and it is likely that you overlook something or break the application e.g. for users with names like "D'Arcy". A better solution is to use the escaping functions provided by PostgreSQL which are available as methods on the :class:`DB` object:: >>> login = "D'Arcy" >>> db.escape_string(login) "D''Arcy" As you see, :meth:`DB.escape_string` has doubled the single quote which is the right thing to do in SQL. However, there are better ways of passing parameters to the query, without having to manually escape them. If you pass the parameters as positional arguments to :meth:`DB.query`, then PyGreSQL will send them to the database separately, without the need for quoting them inside the SQL command, and without the problems inherent with that process. In this case you must put placeholders of the form ``$1``, ``$2`` etc. in the SQL command in place of the parameters that should go there. For instance:: >>> sql = "SELECT * FROM user_table WHERE login = $1 AND passwd = $2" >>> db.query(sql, login, passwd).getresult()[0] That's much better. So please always keep the following warning in mind: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. If you like the ``%`` format specifications of Python better than the placeholders used by PostgreSQL, there is still a way to use them, via the :meth:`DB.query_formatted` method:: >>> sql = "SELECT * FROM user_table WHERE login = %s AND passwd = %s" >>> db.query_formatted(sql, (login, passwd)).getresult()[0] Note that we need to pass the parameters not as positional arguments here, but as a single tuple. Also note again that we did not use the ``%`` operator of Python to format the SQL string, we just used the ``%s`` format specifications of Python and let PyGreSQL care about the formatting. Even better, you can also pass the parameters as a dictionary if you use the :meth:`DB.query_formatted` method:: >>> sql = """SELECT * FROM user_table ... WHERE login = %(login)s AND passwd = %(passwd)s""" >>> parameters = dict(login=login, passwd=passwd) >>> db.query_formatted(sql, parameters).getresult()[0] Here is another example:: >>> sql = "SELECT 'Hello, ' || %s || '!'" >>> db.query_formatted(sql, (login,)).getresult()[0] You would think that the following even simpler example should work, too: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,)).getresult()[0] ProgrammingError: Could not determine data type of parameter $1 The issue here is that :meth:`DB.query_formatted` by default still uses PostgreSQL parameters, transforming the Python style ``%s`` placeholder into a ``$1`` placeholder, and sending the login name separately from the query. In the query we looked at before, the concatenation with other strings made it clear that it should be interpreted as a string. This simple query however does not give PostgreSQL a clue what data type the ``$1`` placeholder stands for. This is different when you are embedding the login name directly into the query instead of passing it as parameter to PostgreSQL. You can achieve this by setting the *inline* parameter of :meth:`DB.query_formatted`, like so:: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,), inline=True).getresult()[0] Another way of making this query work while still sending the parameters separately is to simply cast the parameter values:: >>> sql = "SELECT %s::text" >>> db.query_formatted(sql, (login,), inline=False).getresult()[0] In real world examples you will rarely have to cast your parameters like that, since in an INSERT statement or a WHERE clause comparing the parameter to a table column, the data type will be clear from the context. When binding the parameters to a query, PyGreSQL not only adapts the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> db.query_formatted("SELECT %(array)s::int[]", params).getresult()[0][0] [[1, 2], [3, 4]] Note that again we need to cast the array parameter or use inline parameters only because this simple query does not provide enough context. Also note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. Tuples are adapted as PostgreSQL composite types. If you use inline parameters, they can also be used with the ``IN`` syntax. Let's think of a more real world example again where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> db.query_formatted("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> db.query("SELECT * FROM on_hand").getresult()[0][0] Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) The :meth:`DB.insert` method provides a simpler way to achieve the same:: >>> row = dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99'))} Perhaps we want to use custom Python classes instead of named tuples to hold our values:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error. This is because PyGreSQL tries to pass the string representation of the object as a parameter to PostgreSQL, but this is just a human readable string and not useful for PostgreSQL to build a composite type. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_str__``, like so:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_str__(self, typ): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. You can even make these objects adapt to different types in different ways:: >>> class InventoryItem: ... ... ... ... ... def __pg_str__(self, typ): ... if typ == 'text': ... return str(self) ... return (self.name, self.supplier_id, self.price) ... >>> db.query("ALTER TABLE on_hand ADD COLUMN remark varchar") >>> item=InventoryItem('fuzzy dice', 42, 1.99) >>> row = dict(item=item, remark=item, count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), 'remark': 'fuzzy dice (from 42, at $1.99)'} There is also another "magic" method ``__pg_repr__`` which does not take the *typ* parameter. That method is used instead of ``__pg_str__`` when passing parameters inline. You must be more careful when using ``__pg_repr__``, because it must return a properly escaped string that can be put literally inside the SQL. The only exception is when you return a tuple or list, because these will be adapted and properly escaped by PyGreSQL again. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via the :meth:`DB.get()`, :meth:`Query.getresult()` and similar methods. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pg.get_typecast('int4') int In the classic PyGreSQL module, the typecasting for these basic types is always done internally by the C extension module for performance reasons. We can set a different typecast function for ``int4``, but it will not become effective, the C module continues to use its internal typecasting. However, we can add new typecast functions for the database types that are not supported by the C module. For example, we can create a typecast function that casts items of the composite PostgreSQL type used as example in the previous section to instances of the corresponding Python class. To do this, at first we get the default typecast function that PyGreSQL has created for the current :class:`DB` connection. This default function casts composite types to named tuples, as we have seen in the section before. We can grab it from the :attr:`DB.dbtypes` object as follows:: >>> cast_tuple = db.dbtypes.get_typecast('inventory_item') Now we can create a new typecast function that converts the tuple to an instance of our custom class:: >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) Finally, we set this typecast function, either globally with :func:`set_typecast`, or locally for the current connection like this:: >>> db.dbtypes.set_typecast('inventory_item', cast_item) Now we can get instances of our custom class directly from the database:: >>> item = db.query("SELECT * FROM on_hand").getresult()[0][0] >>> str(item) 'fuzzy dice (from 42, at $1.99)' Note that some of the typecast functions used by the C module are configurable with separate module level functions, such as :meth:`set_decimal`, :meth:`set_bool` or :meth:`set_jsondecode`. You need to use these instead of :meth:`set_typecast` if you want to change the behavior of the C module. Also note that after changing global typecast functions with :meth:`set_typecast`, you may need to run ``db.dbtypes.reset_typecast()`` to make these changes effective on connections that were already open. As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> q = db.query("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""") >>> q.getresult()[0][0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 12). So let's resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].split(',') ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pg.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python simply using:: >>> circle = db.get_as_dict('circle', scalar=True) The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle['C1'] Circle(Point(2, 3), 3.0) >>> circle['C2'] Circle(Point(1, -1), 4.0) Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] pygresql-5.1.2/docs/contents/pg/connection.rst000066400000000000000000000475571365010227600214750ustar00rootroot00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pg .. class:: Connection This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls. .. note:: Some methods give direct access to the connection socket. *Do not use them unless you really know what you are doing.* If you prefer disabling them, do not set the ``direct_access`` option in the Python setup file. These methods are specified by the tag [DA]. .. note:: Some other methods give access to large objects (refer to PostgreSQL user manual for more information about these). If you want to forbid access to these from the module, set the ``large_objects`` option in the Python setup file. These methods are specified by the tag [LO]. query -- execute a SQL command string ------------------------------------- .. method:: Connection.query(command, [args]) Execute a SQL command string :param str command: SQL command :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row as an integer. If the query is an update or delete statement, or an insert statement that did not insert exactly one row, or on a table without OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an ``"insert/update ... returning"`` statement), this method returns a :class:`Query`. Otherwise, it returns ``None``. You can use the :class:`Query` object as an iterator that yields all results as tuples, or call :meth:`Query.getresult` to get the result as a list of tuples. Alternatively, you can call :meth:`Query.dictresult` or :meth:`Query.dictiter` if you want to get the rows as dictionaries, or :meth:`Query.namedresult` or :meth:`Query.namediter` if you want to get the rows as named tuples. You can also simply print the :class:`Query` object to show the query results on the console. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data, in which case the values must be supplied separately as a tuple. The values are substituted by the database in such a way that they don't need to be escaped, making this an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors. If you don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. When the database could not process the query, a :exc:`pg.ProgrammingError` or a :exc:`pg.InternalError` is raised. You can check the ``SQLSTATE`` error code of this error by reading its :attr:`sqlstate` attribute. Example:: name = input("Name? ") phone = con.query("select phone from employees where name=$1", (name,)).getresult() query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: Connection.query_prepared(name, [args]) Execute a prepared statement :param str name: name of the prepared statement :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This method works exactly like :meth:`Connection.query` except that instead of passing the command itself, you pass the name of a prepared statement. An empty name corresponds to the unnamed statement. You must have previously created the corresponding named or unnamed statement with :meth:`Connection.prepare`, or an :exc:`pg.OperationalError` will be raised. .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: Connection.prepare(name, command) Create a prepared statement :param str name: name of the prepared statement :param str command: SQL command :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for the given command for later execution with the :meth:`Connection.query_prepared` method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise a :exc:`pg.ProgrammingError` is raised if the statement name is already defined in the current database session. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then later be passed to the :meth:`Connection.query_prepared` method separately as a tuple. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: Connection.describe_prepared(name) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 reset -- reset the connection ----------------------------- .. method:: Connection.reset() Reset the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method resets the current database connection. cancel -- abandon processing of current SQL command --------------------------------------------------- .. method:: Connection.cancel() :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method requests that the server abandon processing of the current SQL command. close -- close the database connection -------------------------------------- .. method:: Connection.close() Close the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments This method closes the database connection. The connection will be closed in any case when the connection is deleted but this allows you to explicitly close it. It is mainly here to allow the DB-SIG API wrapper to implement a close function. transaction -- get the current transaction state ------------------------------------------------ .. method:: Connection.transaction() Get the current in-transaction status of the server :returns: the current in-transaction status :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection The status returned by this method can be :const:`TRANS_IDLE` (currently idle), :const:`TRANS_ACTIVE` (a command is in progress), :const:`TRANS_INTRANS` (idle, in a valid transaction block), or :const:`TRANS_INERROR` (idle, in a failed transaction block). :const:`TRANS_UNKNOWN` is reported if the connection is bad. The status :const:`TRANS_ACTIVE` is reported only when a query has been sent to the server and not yet completed. parameter -- get a current server parameter setting --------------------------------------------------- .. method:: Connection.parameter(name) Look up a current parameter setting of the server :param str name: the name of the parameter to look up :returns: the current setting of the specified parameter :rtype: str or None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection Certain parameter values are reported by the server automatically at connection startup or whenever their values change. This method can be used to interrogate these settings. It returns the current value of a parameter if known, or *None* if the parameter is not known. You can use this method to check the settings of important parameters such as `server_version`, `server_encoding`, `client_encoding`, `application_name`, `is_superuser`, `session_authorization`, `DateStyle`, `IntervalStyle`, `TimeZone`, `integer_datetimes`, and `standard_conforming_strings`. Values that are not reported by this method can be requested using :meth:`DB.get_parameter`. .. versionadded:: 4.0 date_format -- get the currently used date format ------------------------------------------------- .. method:: Connection.date_format() Look up the date format currently being used by the database :returns: the current date format :rtype: str :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the current date format used by the server. Note that it is cheap to call this method, since there is no database query involved and the setting is also cached internally. You will need the date format when you want to manually typecast dates and timestamps coming from the database instead of using the built-in typecast functions. The date format returned by this method can be directly used with date formatting functions such as :meth:`datetime.strptime`. It is derived from the current setting of the database parameter ``DateStyle``. .. versionadded:: 5.0 fileno -- get the socket used to connect to the database -------------------------------------------------------- .. method:: Connection.fileno() Get the socket used to connect to the database :returns: the socket id of the database connection :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the underlying socket id used to connect to the database. This is useful for use in select calls, etc. getnotify -- get the last notify from the server ------------------------------------------------ .. method:: Connection.getnotify() Get the last notify from the server :returns: last notify from server :rtype: tuple, None :raises TypeError: too many parameters :raises TypeError: invalid connection This method tries to get a notify from the server (from the SQL statement NOTIFY). If the server returns no notify, the methods returns None. Otherwise, it returns a tuple (triplet) *(relname, pid, extra)*, where *relname* is the name of the notify, *pid* is the process id of the connection that triggered the notify, and *extra* is a payload string that has been sent with the notification. Remember to do a listen query first, otherwise :meth:`Connection.getnotify` will always return ``None``. .. versionchanged:: 4.1 Support for payload strings was added in version 4.1. inserttable -- insert a list into a table ----------------------------------------- .. method:: Connection.inserttable(table, values) Insert a Python list into a database table :param str table: the table name :param list values: list of rows values :rtype: None :raises TypeError: invalid connection, bad argument type, or too many arguments :raises MemoryError: insert buffer could not be allocated :raises ValueError: unsupported values This method allows to *quickly* insert large blocks of data in a table: It inserts the whole values list into the given table. Internally, it uses the COPY command of the PostgreSQL database. The list is a list of tuples/lists that define the values for each inserted row. The rows values may contain string, integer, long or double (real) values. .. warning:: This method doesn't type check the fields according to the table definition; it just looks whether or not it knows how to handle such types. get/set_cast_hook -- fallback typecast function ----------------------------------------------- .. method:: Connection.get_cast_hook() Get the function that handles all external typecasting :returns: the current external typecast function :rtype: callable, None :raises TypeError: too many (any) arguments This returns the callback function used by PyGreSQL to provide plug-in Python typecast functions for the connection. .. versionadded:: 5.0 .. method:: Connection.set_cast_hook(func) Set a function that will handle all external typecasting :param func: the function to be used as a callback :rtype: None :raises TypeError: the specified notice receiver is not callable This methods allows setting a custom fallback function for providing Python typecast functions for the connection to supplement the C extension module. If you set this function to *None*, then only the typecast functions implemented in the C extension module are enabled. You normally would not want to change this. Instead, you can use :func:`get_typecast` and :func:`set_typecast` to add or change the plug-in Python typecast functions. .. versionadded:: 5.0 get/set_notice_receiver -- custom notice receiver ------------------------------------------------- .. method:: Connection.get_notice_receiver() Get the current notice receiver :returns: the current notice receiver callable :rtype: callable, None :raises TypeError: too many (any) arguments This method gets the custom notice receiver callback function that has been set with :meth:`Connection.set_notice_receiver`, or ``None`` if no custom notice receiver has ever been set on the connection. .. versionadded:: 4.1 .. method:: Connection.set_notice_receiver(func) Set a custom notice receiver :param func: the custom notice receiver callback function :rtype: None :raises TypeError: the specified notice receiver is not callable This method allows setting a custom notice receiver callback function. When a notice or warning message is received from the server, or generated internally by libpq, and the message level is below the one set with ``client_min_messages``, the specified notice receiver function will be called. This function must take one parameter, the :class:`Notice` object, which provides the following read-only attributes: .. attribute:: Notice.pgcnx the connection .. attribute:: Notice.message the full message with a trailing newline .. attribute:: Notice.severity the level of the message, e.g. 'NOTICE' or 'WARNING' .. attribute:: Notice.primary the primary human-readable error message .. attribute:: Notice.detail an optional secondary error message .. attribute:: Notice.hint an optional suggestion what to do about the problem .. versionadded:: 4.1 putline -- write a line to the server socket [DA] ------------------------------------------------- .. method:: Connection.putline(line) Write a line to the server socket :param str line: line to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters This method allows to directly write a string to the server socket. getline -- get a line from server socket [DA] --------------------------------------------- .. method:: Connection.getline() Get a line from server socket :returns: the line read :rtype: str :raises TypeError: invalid connection :raises TypeError: too many parameters :raises MemoryError: buffer overflow This method allows to directly read a string from the server socket. endcopy -- synchronize client and server [DA] --------------------------------------------- .. method:: Connection.endcopy() Synchronize client and server :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters The use of direct access methods may desynchronize client and server. This method ensure that client and server will be synchronized. locreate -- create a large object in the database [LO] ------------------------------------------------------ .. method:: Connection.locreate(mode) Create a large object in the database :param int mode: large object create mode :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises pg.OperationalError: creation error This method creates a large object in the database. The mode can be defined by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE` and :const:`INV_ARCHIVE`). Please refer to PostgreSQL user manual for a description of the mode values. getlo -- build a large object from given oid [LO] ------------------------------------------------- .. method:: Connection.getlo(oid) Create a large object in the database :param int oid: OID of the existing large object :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: bad OID value (0 is invalid_oid) This method allows reusing a previously created large object through the :class:`LargeObject` interface, provided the user has its OID. loimport -- import a file to a large object [LO] ------------------------------------------------ .. method:: Connection.loimport(name) Import a file to a large object :param str name: the name of the file to be imported :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad argument type, or too many arguments :raises pg.OperationalError: error during file import This methods allows to create large objects in a very simple way. You just give the name of a file containing the data to be used. Object attributes ----------------- Every :class:`Connection` defines a set of read-only attributes that describe the connection and its status. These attributes are: .. attribute:: Connection.host the host name of the server (str) .. attribute:: Connection.port the port of the server (int) .. attribute:: Connection.db the selected database (str) .. attribute:: Connection.options the connection options (str) .. attribute:: Connection.user user name on the database system (str) .. attribute:: Connection.protocol_version the frontend/backend protocol being used (int) .. versionadded:: 4.0 .. attribute:: Connection.server_version the backend version (int, e.g. 90305 for 9.3.5) .. versionadded:: 4.0 .. attribute:: Connection.status the status of the connection (int: 1 = OK, 0 = bad) .. attribute:: Connection.error the last warning/error message from the server (str) .. attribute:: Connection.socket the file descriptor number of the connection socket to the server (int) .. versionadded:: 5.1 .. attribute:: Connection.backend_pid the PID of the backend process handling this connection (int) .. versionadded:: 5.1 .. attribute:: Connection.ssl_in_use this is True if the connection uses SSL, False if not .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) .. attribute:: Connection.ssl_attributes SSL-related information about the connection (dict) .. versionadded:: 5.1 (needs PostgreSQL >= 9.5) pygresql-5.1.2/docs/contents/pg/db_types.rst000066400000000000000000000074161365010227600211350ustar00rootroot00000000000000DbTypes -- The internal cache for database types ================================================ .. py:currentmodule:: pg .. class:: DbTypes .. versionadded:: 5.0 The :class:`DbTypes` object is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to PyGreSQL "type names" (which are also returned by :meth:`DB.get_attnames` as dictionary values). These type names are strings which are equal to either the simple PyGreSQL names or to the more fine-grained registered PostgreSQL type names if these have been enabled with :meth:`DB.use_regtypes`. Besides being strings, they carry additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the PostgreSQL type OID - *pgtype* -- the internal PostgreSQL data type name - *regtype* -- the registered PostgreSQL data type name - *simple* -- the more coarse-grained PyGreSQL type name - *typtype* -- `b` = base type, `c` = composite type etc. - *category* -- `A` = Array, `b` =Boolean, `C` = Composite etc. - *delim* -- delimiter for array types - *relid* -- corresponding table for composite types - *attnames* -- attributes for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`DbTypes` class also provides the following methods: .. method:: DbTypes.get_attnames(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: an ordered dictionary mapping field names to type names .. method:: DbTypes.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. method:: DbTypes.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: DbTypes.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or list of type names, or None to reset all typecast functions :type typ: str, list or None .. method:: DbTypes.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that :class:`DbTypes` object is always bound to a database connection. You can also get and set and reset typecast functions on a global level using the functions :func:`pg.get_typecast` and :func:`pg.set_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless you reset the typecast functions by calling the :meth:`DbTypes.reset_typecast` method on :attr:`DB.dbtypes` objects of the running connections. Also note that the typecasting for all of the basic types happens already in the C low-level extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension. pygresql-5.1.2/docs/contents/pg/db_wrapper.rst000066400000000000000000001134411365010227600214450ustar00rootroot00000000000000The DB wrapper class ==================== .. py:currentmodule:: pg .. class:: DB The :class:`Connection` methods are wrapped in the class :class:`DB` which also adds convenient higher level methods for working with the database. It also serves as a context manager for the connection. The preferred way to use this module is as follows:: import pg with pg.DB(...) as db: # for parameters, see below for r in db.query( # just for example "SELECT foo, bar FROM foo_bar_table WHERE foo !~ bar" ).dictresult(): print('%(foo)s %(bar)s' % r) This class can be subclassed as in this example:: import pg class DB_ride(pg.DB): """Ride database wrapper This class encapsulates the database functions and the specific methods for the ride database.""" def __init__(self): """Open a database connection to the rides database""" pg.DB.__init__(self, dbname='ride') self.query("SET DATESTYLE TO 'ISO'") [Add or override methods here] The following describes the methods and variables of this class. Initialization -------------- The :class:`DB` class is initialized with the same arguments as the :func:`connect` function described above. It also initializes a few internal variables. The statement ``db = DB()`` will open the local database with the name of the user just like ``connect()`` does. You can also initialize the DB class with an existing :mod:`pg` or :mod:`pgdb` connection. Pass this connection as a single unnamed parameter, or as a single parameter named ``db``. This allows you to use all of the methods of the DB class with a DB-API 2 compliant connection. Note that the :meth:`Connection.close` and :meth:`Connection.reopen` methods are inoperative in this case. pkey -- return the primary key of a table ----------------------------------------- .. method:: DB.pkey(table) Return the primary key of a table :param str table: name of table :returns: Name of the field which is the primary key of the table :rtype: str :raises KeyError: the table does not have a primary key This method returns the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. get_databases -- get list of databases in the system ---------------------------------------------------- .. method:: DB.get_databases() Get the list of databases in the system :returns: all databases in the system :rtype: list Although you can do this with a simple select, it is added here for convenience. get_relations -- get list of relations in connected database ------------------------------------------------------------ .. method:: DB.get_relations([kinds], [system]) Get the list of relations in connected database :param str kinds: a string or sequence of type letters :param bool system: whether system relations should be returned :returns: all relations of the given kinds in the database :rtype: list This method returns the list of relations in the connected database. Although you can do this with a simple select, it is added here for convenience. You can select which kinds of relations you are interested in by passing type letters in the `kinds` parameter. The type letters are ``r`` = ordinary table, ``i`` = index, ``S`` = sequence, ``v`` = view, ``c`` = composite type, ``s`` = special, ``t`` = TOAST table. If `kinds` is None or an empty string, all relations are returned (this is also the default). If `system` is set to `True`, then system tables and views (temporary tables, toast tables, catalog views and tables) will be returned as well, otherwise they will be ignored. get_tables -- get list of tables in connected database ------------------------------------------------------ .. method:: DB.get_tables([system]) Get the list of tables in connected database :param bool system: whether system tables should be returned :returns: all tables in connected database :rtype: list This is a shortcut for ``get_relations('r', system)`` that has been added for convenience. get_attnames -- get the attribute names of a table -------------------------------------------------- .. method:: DB.get_attnames(table) Get the attribute names of a table :param str table: name of table :returns: an ordered dictionary mapping attribute names to type names Given the name of a table, digs out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. By default, only a limited number of simple types will be returned. You can get the registered types instead, if enabled by calling the :meth:`DB.use_regtypes` method. has_table_privilege -- check table privilege -------------------------------------------- .. method:: DB.has_table_privilege(table, privilege) Check whether current user has specified table privilege :param str table: the name of the table :param str privilege: privilege to be checked -- default is 'select' :returns: whether current user has specified table privilege :rtype: bool Returns True if the current user has the specified privilege for the table. .. versionadded:: 4.0 get/set_parameter -- get or set run-time parameters ---------------------------------------------------- .. method:: DB.get_parameter(parameter) Get the value of run-time parameters :param parameter: the run-time parameter(s) to get :type param: str, tuple, list or dict :returns: the current value(s) of the run-time parameter(s) :rtype: str, list or dict :raises TypeError: Invalid parameter type(s) :raises pg.ProgrammingError: Invalid parameter name(s) If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name ``'all'`` as the parameter, you can get a dict of all existing configuration parameters. Note that you can request most of the important parameters also using :meth:`Connection.parameter()` which does not involve a database query, unlike :meth:`DB.get_parameter` and :meth:`DB.set_parameter`. .. versionadded:: 4.2 .. method:: DB.set_parameter(parameter, [value], [local]) Set the value of run-time parameters :param parameter: the run-time parameter(s) to set :type param: string, tuple, list or dict :param value: the value to set :type param: str or None :raises TypeError: Invalid parameter type(s) :raises ValueError: Invalid value argument(s) :raises pg.ProgrammingError: Invalid parameter name(s) or values If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or *None* is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name ``'all'`` as the parameter, you can reset all existing settable run-time parameters to their default values. If you set *local* to `True`, then the command takes effect for only the current transaction. After :meth:`DB.commit` or :meth:`DB.rollback`, the session-level setting takes effect again. Setting *local* to `True` will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. .. versionadded:: 4.2 begin/commit/rollback/savepoint/release -- transaction handling --------------------------------------------------------------- .. method:: DB.begin([mode]) Begin a transaction :param str mode: an optional transaction mode such as 'READ ONLY' This initiates a transaction block, that is, all following queries will be executed in a single transaction until :meth:`DB.commit` or :meth:`DB.rollback` is called. .. versionadded:: 4.1 .. method:: DB.start() This is the same as the :meth:`DB.begin` method. .. method:: DB.commit() Commit a transaction This commits the current transaction. .. method:: DB.end() This is the same as the :meth:`DB.commit` method. .. versionadded:: 4.1 .. method:: DB.rollback([name]) Roll back a transaction :param str name: optionally, roll back to the specified savepoint This rolls back the current transaction, discarding all its changes. .. method:: DB.abort() This is the same as the :meth:`DB.rollback` method. .. versionadded:: 4.2 .. method:: DB.savepoint(name) Define a new savepoint :param str name: the name to give to the new savepoint This establishes a new savepoint within the current transaction. .. versionadded:: 4.1 .. method:: DB.release(name) Destroy a savepoint :param str name: the name of the savepoint to destroy This destroys a savepoint previously defined in the current transaction. .. versionadded:: 4.1 get -- get a row from a database table or view ---------------------------------------------- .. method:: DB.get(table, row, [keyname]) Get a row from a database table or view :param str table: name of table or view :param row: either a dictionary or the value to be looked up :param str keyname: name of field to use as key (optional) :returns: A dictionary - the keys are the attribute names, the values are the row values. :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row This method is the basic mechanism to get a single row. It assumes that the *keyname* specifies a unique row. It must be the name of a single column or a tuple of column names. If *keyname* is not specified, then the primary key for the table is used. If *row* is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed *keyname* or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values if the row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as ``oid(table)`` using the actual name of the table. Note that since PyGreSQL 5.0 this will return the value of an array type column as a Python list by default. insert -- insert a row into a database table -------------------------------------------- .. method:: DB.insert(table, [row], [col=val, ...]) Insert a row into a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the inserted values in the database :rtype: dict :raises pg.ProgrammingError: missing privilege or conflict This method inserts a row into a table. If the optional dictionary is not supplied then the required values must be included as keyword/value pairs. If a dictionary is supplied then any keywords provided will be added to or replace the entry in the dictionary. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. Note that since PyGreSQL 5.0 it is possible to insert a value for an array type column by passing it as a Python list. update -- update a row in a database table ------------------------------------------ .. method:: DB.update(table, [row], [col=val, ...]) Update a row in a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. Like insert, the dictionary is optional and updates will be performed on the fields in the keywords. There must be an OID or primary key either specified using the ``'oid'`` keyword or in the dictionary, in which case the OID must be munged. upsert -- insert a row with conflict resolution ----------------------------------------------- .. method:: DB.upsert(table, [row], [col=val, ...]) Insert a row into a database table with conflict resolution :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for specifying the update :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege This method inserts a row into a table, but instead of raising a ProgrammingError exception in case of violating a constraint or unique index, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of `False` or `None` means the column shall not be updated, a value of `True` means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value `True`. So if in the case of a conflict you want to update every column that has been passed in the dictionary `d` , you would call ``upsert(table, d)``. If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call ``upsert(table, d, **dict.fromkeys(d))``. If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by writing the table prefix ``included.`` before the column name, and you can refer to the value that has been proposed for insertion by writing ``excluded.`` as table prefix. The dictionary is modified in any case to reflect the values in the database after the operation has completed. .. note:: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. With older PostgreSQL versions, you will get a ProgrammingError if you use this method. .. versionadded:: 5.0 query -- execute a SQL command string ------------------------------------- .. method:: DB.query(command, [arg1, [arg2, ...]]) Execute a SQL command string :param str command: SQL command :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to the :class:`Connection` function with the same name, except that positional arguments can be passed either as a single list or tuple, or as individual positional arguments. These arguments will then be used as parameter values of parameterized queries. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query("update employees set phone=$2 where name=$1", name, phone).getresult()[0][0] # or rows = db.query("update employees set phone=$2 where name=$1", (name, phone)).getresult()[0][0] query_formatted -- execute a formatted SQL command string --------------------------------------------------------- .. method:: DB.query_formatted(command, [parameters], [types], [inline]) Execute a formatted SQL command string :param str command: SQL command :param parameters: the values of the parameters for the SQL command :type parameters: tuple, list or dict :param types: optionally, the types of the parameters :type types: tuple, list or dict :param bool inline: whether the parameters should be passed in the SQL :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to :meth:`DB.query`, but using Python format placeholders of the form ``%s`` or ``%(names)s`` instead of PostgreSQL placeholders of the form ``$1``. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set *inline* to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. If you set *inline* to True or don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the result for the last command in this case. Note that the adaptation and conversion of the parameters causes a certain performance overhead. Depending on the type of values, the overhead can be smaller for *inline* queries or if you pass the types of the parameters, so that they don't need to be guessed from the values. For best performance, we recommend using a raw :meth:`DB.query` or :meth:`DB.query_prepared` if you are executing many of the same operations with different parameters. Example:: name = input("Name? ") phone = input("Phone? ") rows = db.query_formatted( "update employees set phone=%s where name=%s", (phone, name)).getresult()[0][0] # or rows = db.query_formatted( "update employees set phone=%(phone)s where name=%(name)s", dict(name=name, phone=phone)).getresult()[0][0] query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: DB.query_prepared(name, [arg1, [arg2, ...]]) Execute a prepared statement :param str name: name of the prepared statement :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This methods works like the :meth:`DB.query` method, except that instead of passing the SQL command, you pass the name of a prepared statement created previously using the :meth:`DB.prepare` method. Passing an empty string or *None* as the name will execute the unnamed statement (see warning about the limited lifetime of the unnamed statement in :meth:`DB.prepare`). The functionality of this method is equivalent to that of the SQL ``EXECUTE`` command. Note that calling EXECUTE would require parameters to be sent inline, and be properly sanitized (escaped, quoted). .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: DB.prepare(name, command) Create a prepared statement :param str command: SQL command :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for later execution of the given command with the :meth:`DB.query_prepared` method. If the name is empty or *None*, the unnamed prepared statement is used, in which case any pre-existing unnamed statement is replaced. Otherwise, if a prepared statement with the specified name is already defined in the current database session, a :exc:`pg.ProgrammingError` is raised. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then be passed to the :meth:`Connection.query_prepared` method as positional arguments. The functionality of this method is equivalent to that of the SQL ``PREPARE`` command. Example:: db.prepare('change phone', "update employees set phone=$2 where ein=$1") while True: ein = input("Employee ID? ") if not ein: break phone = input("Phone? ") db.query_prepared('change phone', ein, phone) .. note:: We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations on the database. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: DB.describe_prepared([name]) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 delete_prepared -- delete a prepared statement ---------------------------------------------- .. method:: DB.delete_prepared([name]) Delete a prepared statement :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are always deallocated automatically when the current session ends. .. versionadded:: 5.1 clear -- clear row values in memory ----------------------------------- .. method:: DB.clear(table, [row]) Clear row values in memory :param str table: name of table :param dict row: optional dictionary of values :returns: an empty row :rtype: dict This method clears all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to *False*, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. If the dictionary is not supplied a new one is created. delete -- delete a row from a database table -------------------------------------------- .. method:: DB.delete(table, [row], [col=val, ...]) Delete a row from a database table :param str table: name of table :param dict d: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :rtype: None :raises pg.ProgrammingError: table has no primary key, row is still referenced or missing privilege :raises KeyError: missing key value for the row This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method will raise a ProgrammingError. truncate -- quickly empty database tables ----------------------------------------- .. method:: DB.truncate(table, [restart], [cascade], [only]) Empty a table or set of tables :param table: the name of the table(s) :type table: str, list or set :param bool restart: whether table sequences should be restarted :param bool cascade: whether referenced tables should also be truncated :param only: whether only parent tables should be truncated :type only: bool or list This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If *restart* is set to `True`, sequences owned by columns of the truncated table(s) are automatically restarted. If *cascade* is set to `True`, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter *only* is not set to `True`, all the descendant tables (if any) will also be truncated. Optionally, a ``*`` can be specified after the table name to explicitly indicate that descendant tables are included. If the parameter *table* is a list, the parameter *only* can also be a list of corresponding boolean values. .. versionadded:: 4.2 get_as_list/dict -- read a table as a list or dictionary -------------------------------------------------------- .. method:: DB.get_as_list(table, [what], [where], [order], [limit], [offset], [scalar]) Get a table as a list :param str table: the name of the table (the FROM clause) :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: list :raises TypeError: the table name has not been specified This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter *what* can restrict the query to only return a subset of the table columns. The parameter *where* can restrict the query to only return a subset of the table rows. The specified SQL expressions all need to be fulfilled for a row to get into the result. The parameter *order* specifies the ordering of the rows. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set *order* to *False* if you don't care about the ordering. The parameters *limit* and *offset* specify the maximum number of rows returned and a number of rows skipped over. If you set the *scalar* option to *True*, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. .. versionadded:: 5.0 .. method:: DB.get_as_dict(table, [keyname], [what], [where], [order], [limit], [offset], [scalar]) Get a table as a dictionary :param str table: the name of the table (the FROM clause) :param keyname: column(s) to be used as key(s) of the dictionary :type keyname: str, list, tuple or None :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: dict or OrderedDict :raises TypeError: the table name has not been specified :raises KeyError: keyname(s) are invalid or not part of the result :raises pg.ProgrammingError: no keyname(s) and table has no primary key This method is similar to :meth:`DB.get_as_list`, but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the *scalar* option has been set to *True*. With the optional parameter *keyname* you can specify a different set of columns to be used as the keys of the dictionary. If the Python version supports it, the dictionary will be an *OrderedDict* using the order specified with the *order* parameter or the key column(s) if not specified. You can set *order* to *False* if you don't care about the ordering. In this case the returned dictionary will be an ordinary one. .. versionadded:: 5.0 escape_literal/identifier/string/bytea -- escape for SQL -------------------------------------------------------- The following methods escape text or binary strings so that they can be inserted directly into an SQL command. Except for :meth:`DB.escape_byte`, you don't need to call these methods for the strings passed as parameters to :meth:`DB.query`. You also don't need to call any of these methods when storing data using :meth:`DB.insert` and similar. .. method:: DB.escape_literal(string) Escape a string for use within SQL as a literal constant :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. .. versionadded:: 4.1 .. method:: DB.escape_identifier(string) Escape a string for use within SQL as an identifier :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use as an SQL identifier, such as a table, column, or function name. This is useful when a user-supplied identifier might contain special characters that would otherwise be misinterpreted by the SQL parser, or when the identifier might contain upper case characters whose case should be preserved. .. versionadded:: 4.1 .. method:: DB.escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_string` with the same name, but the behavior of this method is adjusted depending on the connection properties (such as character encoding). .. method:: DB.escape_bytea(datastring) Escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_bytea` with the same name, but the behavior of this method is adjusted depending on the connection properties (in particular, whether standard-conforming strings are enabled). unescape_bytea -- unescape data retrieved from the database ----------------------------------------------------------- .. method:: DB.unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :meth:`DB.escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. encode/decode_json -- encode and decode JSON data ------------------------------------------------- The following methods can be used to encode end decode data in `JSON `_ format. .. method:: DB.encode_json(obj) Encode a Python object for use within SQL as type ``json`` or ``jsonb`` :param obj: Python object that shall be encoded to JSON format :type obj: dict, list or None :returns: string representation of the Python object in JSON format :rtype: str This method serializes a Python object into a JSON formatted string that can be used within SQL. You don't need to use this method on the data stored with :meth:`DB.insert` and similar, only if you store the data directly as part of an SQL command or parameter with :meth:`DB.query`. This is the same as the :func:`json.dumps` function from the standard library. .. versionadded:: 5.0 .. method:: DB.decode_json(string) Decode ``json`` or ``jsonb`` data that has been retrieved as text :param string: JSON formatted string shall be decoded into a Python object :type string: str :returns: Python object representing the JSON formatted string :rtype: dict, list or None This method deserializes a JSON formatted string retrieved as text from the database to a Python object. You normally don't need to use this method as JSON data is automatically decoded by PyGreSQL. If you don't want the data to be decoded, then you can cast ``json`` or ``jsonb`` columns to ``text`` in PostgreSQL or you can set the decoding function to *None* or a different function using :func:`pg.set_jsondecode`. By default this is the same as the :func:`json.loads` function from the standard library. .. versionadded:: 5.0 use_regtypes -- choose usage of registered type names ----------------------------------------------------- .. method:: DB.use_regtypes([regtypes]) Determine whether registered type names shall be used :param bool regtypes: if passed, set whether registered type names shall be used :returns: whether registered type names are used The :meth:`DB.get_attnames` method can return either simplified "classic" type names (the default) or more fine-grained "registered" type names. Which kind of type names is used can be changed by calling :meth:`DB.get_regtypes`. If you pass a boolean, it sets whether registered type names shall be used. The method can also be used to check through its return value whether registered type names are currently used. .. versionadded:: 4.1 notification_handler -- create a notification handler ----------------------------------------------------- .. class:: DB.notification_handler(event, callback, [arg_dict], [timeout], [stop_event]) Create a notification handler instance :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event This method creates a :class:`pg.NotificationHandler` object using the :class:`DB` connection as explained under :doc:`notification`. .. versionadded:: 4.1.1 Attributes of the DB wrapper class ---------------------------------- .. attribute:: DB.db The wrapped :class:`Connection` object You normally don't need this, since all of the members can be accessed from the :class:`DB` wrapper class as well. .. attribute:: DB.dbname The name of the database that the connection is using .. attribute:: DB.dbtypes A dictionary with the various type names for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`DbTypes` class for details. .. versionadded:: 5.0 .. attribute:: DB.adapter A class with some helper functions for adapting parameters This can be used for building queries with parameters. You normally will not need this, as you can use the :class:`DB.query_formatted` method. .. versionadded:: 5.0 pygresql-5.1.2/docs/contents/pg/index.rst000066400000000000000000000004731365010227600204270ustar00rootroot00000000000000-------------------------------------------- :mod:`pg` --- The Classic PyGreSQL Interface -------------------------------------------- .. module:: pg Contents ======== .. toctree:: introduction module connection db_wrapper query large_objects notification db_types adaptation pygresql-5.1.2/docs/contents/pg/introduction.rst000066400000000000000000000014751365010227600220440ustar00rootroot00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the older :mod:`pg` API. The :mod:`pg` module handles three types of objects, - the :class:`Connection` instances, which handle the connection and all the requests to the database, - the :class:`LargeObject` instances, which handle all the accesses to PostgreSQL large objects, - the :class:`Query` instances that handle query results and it provides a convenient wrapper class :class:`DB` for the basic :class:`Connection` class. .. seealso:: If you want to see a simple example of the use of some of these functions, see the :doc:`../examples` page. pygresql-5.1.2/docs/contents/pg/large_objects.rst000066400000000000000000000143541365010227600221260ustar00rootroot00000000000000LargeObject -- Large Objects ============================ .. py:currentmodule:: pg .. class:: LargeObject Objects that are instances of the class :class:`LargeObject` are used to handle all the requests concerning a PostgreSQL large object. These objects embed and hide all the "recurrent" variables (object OID and connection), exactly in the same way :class:`Connection` instances do, thus only keeping significant parameters in function calls. The :class:`LargeObject` instance keeps a reference to the :class:`Connection` object used for its creation, sending requests though with its parameters. Any modification but dereferencing the :class:`Connection` object will thus affect the :class:`LargeObject` instance. Dereferencing the initial :class:`Connection` object is not a problem since Python won't deallocate it before the :class:`LargeObject` instance dereferences it. All functions return a generic error message on call error, whatever the exact error was. The :attr:`error` attribute of the object allows to get the exact error message. See also the PostgreSQL programmer's guide for more information about the large object interface. open -- open a large object --------------------------- .. method:: LargeObject.open(mode) Open a large object :param int mode: open mode definition :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: already opened object, or open error This method opens a large object for reading/writing, in the same way than the Unix open() function. The mode value can be obtained by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE`). close -- close a large object ----------------------------- .. method:: LargeObject.close() Close a large object :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters :raises IOError: object is not opened, or close error This method closes a previously opened large object, in the same way than the Unix close() function. read, write, tell, seek, unlink -- file-like large object handling ------------------------------------------------------------------ .. method:: LargeObject.read(size) Read data from large object :param int size: maximal size of the buffer to be read :returns: the read buffer :rtype: bytes :raises TypeError: invalid connection, invalid object, bad parameter type, or too many parameters :raises ValueError: if `size` is negative :raises IOError: object is not opened, or read error This function allows to read data from a large object, starting at current position. .. method:: LargeObject.write(string) Read data to large object :param bytes string: string buffer to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: object is not opened, or write error This function allows to write data to a large object, starting at current position. .. method:: LargeObject.seek(offset, whence) Change current position in large object :param int offset: position offset :param int whence: positional parameter :returns: new position in object :rtype: int :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not opened, or seek error This method allows to move the position cursor in the large object. The valid values for the whence parameter are defined as constants in the :mod:`pg` module (:const:`SEEK_SET`, :const:`SEEK_CUR`, :const:`SEEK_END`). .. method:: LargeObject.tell() Return current position in large object :returns: current position in large object :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek error This method allows to get the current position in the large object. .. method:: LargeObject.unlink() Delete large object :rtype: None :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not closed, or unlink error This methods unlinks (deletes) the PostgreSQL large object. size -- get the large object size --------------------------------- .. method:: LargeObject.size() Return the large object size :returns: the large object size :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek/tell error This (composite) method allows to get the size of a large object. It was implemented because this function is very useful for a web interfaced database. Currently, the large object needs to be opened first. export -- save a large object to a file --------------------------------------- .. method:: LargeObject.export(name) Export a large object to a file :param str name: file to be created :rtype: None :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not closed, or export error This methods allows to dump the content of a large object in a very simple way. The exported file is created on the host of the program, not the server host. Object attributes ----------------- :class:`LargeObject` objects define a read-only set of attributes that allow to get some information about it. These attributes are: .. attribute:: LargeObject.oid the OID associated with the large object (int) .. attribute:: LargeObject.pgcnx the :class:`Connection` object associated with the large object .. attribute:: LargeObject.error the last warning/error message of the connection (str) .. warning:: In multi-threaded environments, :attr:`LargeObject.error` may be modified by another thread using the same :class:`Connection`. Remember these object are shared, not duplicated. You should provide some locking to be able if you want to check this. The :attr:`LargeObject.oid` attribute is very interesting, because it allows you to reuse the OID later, creating the :class:`LargeObject` object with a :meth:`Connection.getlo` method call. pygresql-5.1.2/docs/contents/pg/module.rst000066400000000000000000000670631365010227600206150ustar00rootroot00000000000000Module functions and constants ============================== .. py:currentmodule:: pg The :mod:`pg` module defines a few functions that allow to connect to a database and to define "default variables" that override the environment variables used by PostgreSQL. These "default variables" were designed to allow you to handle general connection parameters without heavy code in your programs. You can prompt the user for a value, put it in the default variable, and forget it, without having to modify your environment. The support for default variables can be disabled by not setting the ``default_vars`` option in the Python setup file. Methods relative to this are specified by the tag [DV]. All variables are set to ``None`` at module initialization, specifying that standard environment variables should be used. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dbname], [host], [port], [opt], [user], [passwd]) Open a :mod:`pg` connection :param dbname: name of connected database (*None* = :data:`defbase`) :type str: str or None :param host: name of the server host (*None* = :data:`defhost`) :type host: str or None :param port: port used by the database server (-1 = :data:`defport`) :type port: int :param opt: connection options (*None* = :data:`defopt`) :type opt: str or None :param user: PostgreSQL user (*None* = :data:`defuser`) :type user: str or None :param passwd: password for user (*None* = :data:`defpasswd`) :type passwd: str or None :returns: If successful, the :class:`Connection` handling the connection :rtype: :class:`Connection` :raises TypeError: bad argument type, or too many arguments :raises SyntaxError: duplicate argument definition :raises pg.InternalError: some error occurred during pg connection definition :raises Exception: (all exceptions relative to object allocation) This function opens a connection to a specified database on a given PostgreSQL server. You can use keywords here, as described in the Python tutorial. The names of the keywords are the name of the parameters given in the syntax line. The ``opt`` parameter can be used to pass command-line options to the server. For a precise description of the parameters, please refer to the PostgreSQL user manual. If you want to add additional parameters not specified here, you must pass a connection string or a connection URI instead of the ``dbname`` (as in ``con3`` and ``con4`` in the following example). Example:: import pg con1 = pg.connect('testdb', 'myhost', 5432, None, 'bob', None) con2 = pg.connect(dbname='testdb', host='myhost', user='bob') con3 = pg.connect('host=myhost user=bob dbname=testdb connect_timeout=10') con4 = pg.connect('postgresql://bob@myhost/testdb?connect_timeout=10') get/set_defhost -- default server host [DV] ------------------------------------------- .. function:: get_defhost(host) Get the default host :returns: the current default host specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default host specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defhost(host) Set the default host :param host: the new default host specification :type host: str or None :returns: the previous default host specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default host value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defport -- default server port [DV] ------------------------------------------- .. function:: get_defport() Get the default port :returns: the current default port specification :rtype: int :raises TypeError: too many arguments This method returns the current default port specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defport(port) Set the default port :param port: the new default port :type port: int :returns: previous default port specification :rtype: int or None This methods sets the default port value for new connections. If -1 is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default port. get/set_defopt -- default connection options [DV] -------------------------------------------------- .. function:: get_defopt() Get the default connection options :returns: the current default options specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default connection options specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defopt(options) Set the default connection options :param options: the new default connection options :type options: str or None :returns: previous default options specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default connection options value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default options. get/set_defbase -- default database name [DV] --------------------------------------------- .. function:: get_defbase() Get the default database name :returns: the current default database name specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database name specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defbase(base) Set the default database name :param base: the new default base name :type base: str or None :returns: the previous default database name specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database name value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defuser -- default database user [DV] --------------------------------------------- .. function:: get_defuser() Get the default database user :returns: the current default database user specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database user specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defuser(user) Set the default database user :param user: the new default database user :type base: str or None :returns: the previous default database user specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database user name for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defpasswd -- default database password [DV] --------------------------------------------------- .. function:: get_defpasswd() Get the default database password :returns: the current default database password specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database password specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defpasswd(passwd) Set the default database password :param passwd: the new default database password :type base: str or None :returns: the previous default database password specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database password for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. escape_string -- escape a string for use within SQL --------------------------------------------------- .. function:: escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments This function escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. :func:`escape_string` performs this operation. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. .. note:: It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to "SQL injection" attacks wherein unwanted SQL commands are fed to your database. Example:: name = input("Name? ") phone = con.query("select phone from employees where name='%s'" % escape_string(name)).getresult() escape_bytea -- escape binary data for use within SQL ----------------------------------------------------- .. function:: escape_bytea(datastring) escape binary data for use within SQL as type ``bytea`` :param str datastring: string containing the binary data that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments Escapes binary data for use within an SQL command with the type ``bytea``. As with :func:`escape_string`, this is only used when inserting data directly into an SQL command string. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. Example:: picture = open('garfield.gif', 'rb').read() con.query("update pictures set img='%s' where name='Garfield'" % escape_bytea(picture)) unescape_bytea -- unescape data that has been retrieved as text --------------------------------------------------------------- .. function:: unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param str datastring: the ``bytea`` data string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes :raises TypeError: bad argument type, or too many arguments Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :func:`escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. Note that there is also a :class:`DB` method with the same name which does exactly the same. get/set_decimal -- decimal type to be used for numeric values ------------------------------------------------------------- .. function:: get_decimal() Get the decimal type to be used for numeric values :returns: the Python class used for PostgreSQL numeric values :rtype: class This function returns the Python class that is used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. .. function:: set_decimal(cls) Set a decimal type to be used for numeric values :param class cls: the Python class to be used for PostgreSQL numeric values This function can be used to specify the Python class that shall be used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal` if available, otherwise the :class:`float` type is used. get/set_decimal_point -- decimal mark used for monetary values -------------------------------------------------------------- .. function:: get_decimal_point() Get the decimal mark used for monetary values :returns: string with one character representing the decimal mark :rtype: str This function returns the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values when converting them to decimal numbers. The default setting is ``'.'`` as a decimal point. This setting is not adapted automatically to the locale used by PostgreSQL, but you can use :func:`set_decimal()` to set a different decimal mark manually. A return value of ``None`` means monetary values are not interpreted as decimal numbers, but returned as strings including the formatting and currency. .. versionadded:: 4.1.1 .. function:: set_decimal_point(string) Specify which decimal mark is used for interpreting monetary values :param str string: string with one character representing the decimal mark This function can be used to specify the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values. The default value is '.' as a decimal point. This value is not adapted automatically to the locale used by PostgreSQL, so if you are dealing with a database set to a locale that uses a ``','`` instead of ``'.'`` as the decimal point, then you need to call ``set_decimal(',')`` to have PyGreSQL interpret monetary values correctly. If you don't want money values to be converted to decimal numbers, then you can call ``set_decimal(None)``, which will cause PyGreSQL to return monetary values as strings including their formatting and currency. .. versionadded:: 4.1.1 get/set_bool -- whether boolean values are returned as bool objects ------------------------------------------------------------------- .. function:: get_bool() Check whether boolean values are returned as bool objects :returns: whether or not bool objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this with the :func:`set_bool` function. .. versionadded:: 4.2 .. function:: set_bool(on) Set whether boolean values are returned as bool objects :param on: whether or not bool objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this by calling ``set_bool(True)``. .. versionadded:: 4.2 .. versionchanged:: 5.0 Boolean values had been returned as string by default in earlier versions. get/set_array -- whether arrays are returned as list objects ------------------------------------------------------------ .. function:: get_array() Check whether arrays are returned as list objects :returns: whether or not list objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this with the :func:`set_array` function. .. versionadded:: 5.0 .. function:: set_array(on) Set whether arrays are returned as list objects :param on: whether or not list objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this by calling ``set_array(False)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Arrays had been always returned as text strings in earlier versions. get/set_bytea_escaped -- whether bytea data is returned escaped --------------------------------------------------------------- .. function:: get_bytea_escaped() Check whether bytea values are returned as escaped strings :returns: whether or not bytea objects will be returned escaped :rtype: bool This function checks whether PyGreSQL returns PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this with the :func:`set_bytea_escaped` function. .. versionadded:: 5.0 .. function:: set_bytea_escaped(on) Set whether bytea values are returned as escaped strings :param on: whether or not bytea objects shall be returned escaped This function can be used to specify whether PyGreSQL shall return PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this by calling ``set_bytea_escaped(True)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Bytea data had been returned in escaped form by default in earlier versions. get/set_jsondecode -- decoding JSON format ------------------------------------------ .. function:: get_jsondecode() Get the function that deserializes JSON formatted strings This returns the function used by PyGreSQL to construct Python objects from JSON formatted strings. .. function:: set_jsondecode(func) Set a function that will deserialize JSON formatted strings :param func: the function to be used for deserializing JSON strings You can use this if you do not want to deserialize JSON strings coming in from the database, or if want to use a different function than the standard function :func:`json.loads` or if you want to use it with parameters different from the default ones. If you set this function to *None*, then the automatic deserialization of JSON strings will be deactivated. .. versionadded:: 5.0 .. versionchanged:: 5.0 JSON data had been always returned as text strings in earlier versions. get/set_datestyle -- assume a fixed date style ---------------------------------------------- .. function:: get_datestyle() Get the assumed date style for typecasting This returns the PostgreSQL date style that is silently assumed when typecasting dates or *None* if no fixed date style is assumed, in which case the date style is requested from the database when necessary (this is the default). Note that this method will *not* get the date style that is currently set in the session or in the database. You can get the current setting with the methods :meth:`DB.get_parameter` and :meth:`Connection.parameter`. You can also get the date format corresponding to the current date style by calling :meth:`Connection.date_format`. .. versionadded:: 5.0 .. function:: set_datestyle(datestyle) Set a fixed date style that shall be assumed when typecasting :param str datestyle: the date style that shall be assumed, or *None* if no fixed dat style shall be assumed PyGreSQL is able to automatically pick up the right date style for typecasting date values from the database, even if you change it for the current session with a ``SET DateStyle`` command. This is happens very effectively without an additional database request being involved. If you still want to have PyGreSQL always assume a fixed date style instead, then you can set one with this function. Note that calling this function will *not* alter the date style of the database or the current session. You can do that by calling the method :meth:`DB.set_parameter` instead. .. versionadded:: 5.0 get/set_typecast -- custom typecasting -------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides through its C extension module basic typecast functions for the common database types, but if you want to add more typecast functions, you can set these using the following functions. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`DbTypes` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`DbTypes.get_typecast`, :meth:`DbTypes.set_typecast` and :meth:`DbTypes.reset_typecast` of the :attr:`DB.dbtypes` object. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`DbTypes.reset_typecast` on the :attr:`DB.dbtypes` object. Also note that the typecasting for all of the basic types happens already in the C extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension module. cast_array/record -- fast parsers for arrays and records -------------------------------------------------------- PostgreSQL returns arrays and records (composite types) using a special output syntax with several quirks that cannot easily and quickly be parsed in Python. Therefore the C extension module provides two fast parsers that allow quickly turning these text representations into Python objects: Arrays will be converted to Python lists, and records to Python tuples. These fast parsers are used automatically by PyGreSQL in order to return arrays and records from database queries as lists and tuples, so you normally don't need to call them directly. You may only need them for typecasting arrays of data types that are not supported by default in PostgreSQL. .. function:: cast_array(string, [cast], [delim]) Cast a string representing a PostgreSQL array to a Python list :param str string: the string with the text representation of the array :param cast: a typecast function for the elements of the array :type cast: callable or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a list representing the PostgreSQL array in Python :rtype: list :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL array (which may look like ``'{{1,2}{3,4}}'`` for a two-dimensional array), a typecast function *cast* that is called for every element, and an optional delimiter character *delim* (usually a comma), and returns a Python list representing the array (which may be nested like ``[[1, 2], [3, 4]]`` in this example). The cast function must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the list. If you don't pass a cast function or set it to *None*, then unprocessed text strings will be returned as elements of the array. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 .. function:: cast_record(string, [cast], [delim]) Cast a string representing a PostgreSQL record to a Python tuple :param str string: the string with the text representation of the record :param cast: typecast function(s) for the elements of the record :type cast: callable, list or tuple of callables, or None :param delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a tuple representing the PostgreSQL record in Python :rtype: tuple :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL record (which may look like ``'(1,a,2,b)'`` for a record composed of four fields), a typecast function *cast* that is called for every element, or a list or tuple of such functions corresponding to the individual fields of the record, and an optional delimiter character *delim* (usually a comma), and returns a Python tuple representing the record (which may be inhomogeneous like ``(1, 'a', 2, 'b')`` in this example). The cast function(s) must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the tuple. If you don't pass cast function(s) or pass *None* instead, then unprocessed text strings will be returned as elements of the tuple. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 Note that besides using parentheses instead of braces, there are other subtle differences in escaping special characters and NULL values between the syntax used for arrays and the one used for composite types, which these functions take into account. Type helpers ------------ The module provides the following type helper functions. You can wrap parameters with these functions when passing them to :meth:`DB.query` or :meth:`DB.query_formatted` in order to give PyGreSQL a hint about the type of the parameters, if it cannot be derived from the context. .. function:: Bytea(bytes) A wrapper for holding a bytea value .. versionadded:: 5.0 .. function:: HStore(dict) A wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj) A wrapper for holding an object serializable to JSON .. versionadded:: 5.0 The following additional type helper is only meaningful when used with :meth:`DB.query_formatted`. It marks a parameter as text that shall be literally included into the SQL. This is useful for passing table names for instance. .. function:: Literal(sql) A wrapper for holding a literal SQL string .. versionadded:: 5.0 Module constants ---------------- Some constants are defined in the module dictionary. They are intended to be used as parameters for methods calls. You should refer to the libpq description in the PostgreSQL user manual for more information about them. These constants are: .. data:: version .. data:: __version__ constants that give the current version .. data:: INV_READ .. data:: INV_WRITE large objects access modes, used by :meth:`Connection.locreate` and :meth:`LargeObject.open` .. data:: SEEK_SET .. data:: SEEK_CUR .. data:: SEEK_END positional flags, used by :meth:`LargeObject.seek` .. data:: TRANS_IDLE .. data:: TRANS_ACTIVE .. data:: TRANS_INTRANS .. data:: TRANS_INERROR .. data:: TRANS_UNKNOWN transaction states, used by :meth:`Connection.transaction` pygresql-5.1.2/docs/contents/pg/notification.rst000066400000000000000000000107001365010227600220000ustar00rootroot00000000000000The Notification Handler ======================== .. py:currentmodule:: pg PyGreSQL comes with a client-side asynchronous notification handler that was based on the ``pgnotify`` module written by Ng Pheng Siong. .. versionadded:: 4.1.1 Instantiating the notification handler -------------------------------------- .. class:: NotificationHandler(db, event, callback, [arg_dict], [timeout], [stop_event]) Create an instance of the notification handler :param int db: the database connection :type db: :class:`Connection` :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event You can also create an instance of the NotificationHandler using the :class:`DB.connection_handler` method. In this case you don't need to pass a database connection because the :class:`DB` connection itself will be used as the datebase connection for the notification handler. You must always pass the name of an *event* (notification channel) to listen for and a *callback* function. You can also specify a dictionary *arg_dict* that will be passed as the single argument to the callback function, and a *timeout* value in seconds (a floating point number denotes fractions of seconds). If it is absent or *None*, the callers will never time out. If the time-out is reached, the callback function will be called with a single argument that is *None*. If you set the *timeout* to ``0``, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as *stop_event*. By default, it will be the event name prefixed with ``'stop_'``. All of the parameters will be also available as attributes of the created notification handler object. Invoking the notification handler --------------------------------- To invoke the notification handler, just call the instance without passing any parameters. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated *pid*, *event* and *extra* (the payload passed with the notification) are inserted into its *arg_dict* dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to ``0``, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. .. warning:: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. Sending notifications --------------------- You can send notifications by either running ``NOTIFY`` commands on the database directly, or using the following method: .. method:: NotificationHandler.notify([db], [stop], [payload]) Generate a notification :param int db: the database connection for sending the notification :type db: :class:`Connection` :param bool stop: whether to produce a normal event or a stop event :param str payload: an optional payload to be sent with the notification This method sends a notification event together with an optional *payload*. If you set the *stop* flag, a stop notification will be sent instead of a normal notification. This will cause the handler to stop listening. .. warning:: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. Auxiliary methods ----------------- .. method:: NotificationHandler.listen() Start listening for the event and the stop event This method is called implicitly when the handler is invoked. .. method:: NotificationHandler.unlisten() Stop listening for the event and the stop event This method is called implicitly when the handler receives a stop event or when it is closed or deleted. .. method:: NotificationHandler.close() Stop listening and close the database connection You can call this method instead of :meth:`NotificationHandler.unlisten` if you want to close not only the handler, but also the database connection it was created with.pygresql-5.1.2/docs/contents/pg/query.rst000066400000000000000000000317521365010227600204710ustar00rootroot00000000000000Query methods ============= .. py:currentmodule:: pg .. class:: Query The :class:`Query` object returned by :meth:`Connection.query` and :meth:`DB.query` can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the :func:`len` function. The :class:`Query` class also provides the following methods for accessing the results of the query: getresult -- get query values as list of tuples ----------------------------------------------- .. method:: Query.getresult() Get query values as list of tuples :returns: result values as a list of tuples :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of tuples. More information about this result may be accessed using :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. Since PyGreSQL 5.1 the :class:`Query` can be also used directly as an iterable sequence, i.e. you can iterate over the :class:`Query` object to get the same tuples as returned by :meth:`Query.getresult`. This is slightly more efficient than getting the full list of results, but note that the full result is always fetched from the server anyway when the query is executed. You can also call :func:`len` on a query to find the number of rows in the result, and access row tuples using their index directly on the :class:`Query` object. dictresult/dictiter -- get query values as dictionaries ------------------------------------------------------- .. method:: Query.dictresult() Get query values as list of dictionaries :returns: result values as a list of dictionaries :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of dictionaries which have the field names as keys. If the query has duplicate field names, you will get the value for the field with the highest index in the query. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. method:: Query.dictiter() Get query values as iterable of dictionaries :returns: result values as an iterable of dictionaries :rtype: iterable :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as an iterable of dictionaries which have the field names as keys. This is slightly more efficient than getting the full list of results as dictionaries, but note that the full result is always fetched from the server anyway when the query is executed. If the query has duplicate field names, you will get the value for the field with the highest index in the query. .. versionadded:: 5.1 namedresult/namediter -- get query values as named tuples --------------------------------------------------------- .. method:: Query.namedresult() Get query values as list of named tuples :returns: result values as a list of named tuples :rtype: list :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as a list of named tuples with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. versionadded:: 4.1 .. method:: Query.namediter() Get query values as iterable of named tuples :returns: result values as an iterable of named tuples :rtype: iterable :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as an iterable of named tuples with proper field names. This is slightly more efficient than getting the full list of results as named tuples, but note that the full result is always fetched from the server anyway when the query is executed. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. .. versionadded:: 5.1 scalarresult/scalariter -- get query values as scalars ------------------------------------------------------ .. method:: Query.scalarresult() Get first fields from query result as list of scalar values :returns: first fields from result as a list of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as a list of scalar values in the order returned by the server. .. versionadded:: 5.1 .. method:: Query.scalariter() Get first fields from query result as iterable of scalar values :returns: first fields from result as an iterable of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as an iterable of scalar values in the order returned by the server. This is slightly more efficient than getting the full list of results as rows or scalar values, but note that the full result is always fetched from the server anyway when the query is executed. .. versionadded:: 5.1 one/onedict/onenamed/onescalar -- get one result of a query ----------------------------------------------------------- .. method:: Query.one() Get one row from the result of a query as a tuple :returns: next row from the query results as a tuple of fields :rtype: tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a tuple of fields. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onedict() Get one row from the result of a query as a dictionary :returns: next row from the query results as a dictionary :rtype: dict or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a dictionary with the field names used as the keys. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onenamed() Get one row from the result of a query as named tuple :returns: next row from the query results as a named tuple :rtype: named tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onescalar() Get one row from the result of a query as scalar value :returns: next row from the query results as a scalar value :rtype: type of first field or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of the next row from the result as a scalar value. This method can be called multiple times to return more rows as scalars. It returns None if the result does not contain one more row. .. versionadded:: 5.1 single/singledict/singlenamed/singlescalar -- get single result of a query -------------------------------------------------------------------------- .. method:: Query.single() Get single row from the result of a query as a tuple :returns: single row from the query results as a tuple of fields :rtype: tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a tuple of fields. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singledict() Get single row from the result of a query as a dictionary :returns: single row from the query results as a dictionary :rtype: dict :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a dictionary with the field names used as the keys. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlenamed() Get single row from the result of a query as named tuple :returns: single row from the query results as a named tuple :rtype: named tuple :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns single row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlescalar() Get single row from the result of a query as scalar value :returns: single row from the query results as a scalar value :rtype: type of first field :raises InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of a single row from the result as a scalar value. This method returns the same single row as scalar when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 listfields -- list fields names of previous query result -------------------------------------------------------- .. method:: Query.listfields() List fields names of previous query result :returns: field names :rtype: list :raises TypeError: too many parameters This method returns the list of field names defined for the query result. The fields are in the same order as the result values. fieldname, fieldnum -- field name/number conversion --------------------------------------------------- .. method:: Query.fieldname(num) Get field name from its number :param int num: field number :returns: field name :rtype: str :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: invalid field number This method allows to find a field name from its rank number. It can be useful for displaying a result. The fields are in the same order as the result values. .. method:: Query.fieldnum(name) Get field number from its name :param str name: field name :returns: field number :rtype: int :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: unknown field name This method returns a field number given its name. It can be used to build a function that converts result list strings to their correct type, using a hardcoded table definition. The number returned is the field rank in the query result. ntuples -- return number of tuples in query object -------------------------------------------------- .. method:: Query.ntuples() Return number of tuples in query object :returns: number of tuples in :class:`Query` :rtype: int :raises TypeError: Too many arguments. This method returns the number of tuples in the query result. .. deprecated:: 5.1 You can use the normal :func:`len` function instead. pygresql-5.1.2/docs/contents/pgdb/000077500000000000000000000000001365010227600170705ustar00rootroot00000000000000pygresql-5.1.2/docs/contents/pgdb/adaptation.rst000066400000000000000000000353161365010227600217560ustar00rootroot00000000000000Remarks on Adaptation and Typecasting ===================================== .. py:currentmodule:: pgdb Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int [#int8]_ int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#int8] int8 is converted to long in Python 2 .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ PyGreSQL knows how to adapt the common Python types to get a suitable representation of their values for PostgreSQL when you pass parameters to a query. For example:: >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> parameters = (144, 3.75, 'hello', None) >>> tuple(cur.execute('SELECT %s, %s, %s, %s', parameters).fetchone() (144, Decimal('3.75'), 'hello', None) This is the result we can expect, so obviously PyGreSQL has adapted the parameters and sent the following query to PostgreSQL: .. code-block:: sql SELECT 144, 3.75, 'hello', NULL Note the subtle, but important detail that even though the SQL string passed to :meth:`cur.execute` contains conversion specifications normally used in Python with the ``%`` operator for formatting strings, we didn't use the ``%`` operator to format the parameters, but passed them as the second argument to :meth:`cur.execute`. I.e. we **didn't** write the following:: >>> tuple(cur.execute('SELECT %s, %s, %s, %s' % parameters).fetchone() If we had done this, PostgreSQL would have complained because the parameters were not adapted. Particularly, there would be no quotes around the value ``'hello'``, so PostgreSQL would have interpreted this as a database column, which would have caused a :exc:`ProgrammingError`. Also, the Python value ``None`` would have been included in the SQL command literally, instead of being converted to the SQL keyword ``NULL``, which would have been another reason for PostgreSQL to complain about our bad query: .. code-block:: sql SELECT 144, 3.75, hello, None Even worse, building queries with the use of the ``%`` operator makes us vulnerable to so called "SQL injection" exploits, where an attacker inserts malicious SQL statements into our queries that we never intended to be executed. We could avoid this by carefully quoting and escaping the parameters, but this would be tedious and if we overlook something, our code will still be vulnerable. So please don't do this. This cannot be emphasized enough, because it is such a subtle difference and using the ``%`` operator looks so natural: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. The good thing is that by letting PyGreSQL do the work for you, you can treat all your parameters equally and don't need to ponder where you need to put quotes or need to escape strings. You can and should also always use the general ``%s`` specification instead of e.g. using ``%d`` for integers. Actually, to avoid mistakes and make it easier to insert parameters at more than one location, you can and should use named specifications, like this:: >>> params = dict(greeting='Hello', name='HAL') >>> sql = """SELECT %(greeting)s || ', ' || %(name)s ... || '. Do you read me, ' || %(name)s || '?'""" >>> cur.execute(sql, params).fetchone()[0] 'Hello, HAL. Do you read me, HAL?' PyGreSQL does not only adapt the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> cur.execute("SELECT %(array)s", params).fetchone()[0] [[1, 2], [3, 4]] Note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. The query that was actually executed was this: .. code-block:: sql SELECT ARRAY[[1,2],[3,4]] Again, if we had inserted the list using the ``%`` operator without adaptation, the ``ARRAY`` keyword would have been missing in the query. Tuples are adapted as PostgreSQL composite types:: >>> params = dict(record=('Bond', 'James')) >>> cur.execute("SELECT %(record)s", params).fetchone()[0] ('Bond', 'James') You can also use this feature with the ``IN`` syntax of SQL:: >>> params = dict(what='needle', where=('needle', 'haystack')) >>> cur.execute("SELECT %(what)s IN %(where)s", params).fetchone()[0] True Sometimes a Python type can be ambiguous. For instance, you might want to insert a Python list not into an array column, but into a JSON column. Or you want to interpret a string as a date and insert it into a DATE column. In this case you can give PyGreSQL a hint by using :ref:`type_constructors`:: >>> cur.execute("CREATE TABLE json_data (data json, created date)") >>> params = dict( ... data=pgdb.Json([1, 2, 3]), created=pgdb.Date(2016, 1, 29)) >>> sql = ("INSERT INTO json_data VALUES (%(data)s, %(created)s)") >>> cur.execute(sql, params) >>> cur.execute("SELECT * FROM json_data").fetchone() Row(data=[1, 2, 3], created='2016-01-29') Let's think of another example where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> cur.execute("SELECT * FROM on_hand").fetchone() Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) However, we may not want to use named tuples, but custom Python classes to hold our values, like this one:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=InventoryItem('fuzzy dice', 42, 1.99), count=1000)) InterfaceError: Do not know how to adapt type While PyGreSQL knows how to adapt tuples, it does not know what to make out of our custom class. To simply convert the object to a string using the ``str`` function is not a solution, since this yields a human readable string that is not useful for PostgreSQL. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_repr__``, like this:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '%s (from %s, at $%s)' % ( ... self.name, self.supplier_id, self.price) ... ... def __pg_repr__(self): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. Note that PyGreSQL adapts the result of ``__pg_repr__`` again if it is a tuple or a list. Otherwise, it must be a properly escaped string. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via one of the "fetch" methods of a cursor. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set, and :func:`reset_typecast` allows you to reset the typecast function to its default. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pgdb.get_typecast('int4') int You can change this to return float values instead:: >>> pgdb.set_typecast('int4', float) >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> cur.execute('select 42::int4').fetchone()[0] 42.0 Note that the connections cache the typecast functions, so you may need to reopen the database connection, or reset the cache of the connection to make this effective, using the following command:: >>> con.type_cache.reset_typecast() The :class:`TypeCache` of the connection can also be used to change typecast functions locally for one database connection only. As a more useful example, we can create a typecast function that casts items of the composite type used as example in the previous section to instances of the corresponding Python class:: >>> con.type_cache.reset_typecast() >>> cast_tuple = con.type_cache.get_typecast('inventory_item') >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) >>> con.type_cache.set_typecast('inventory_item', cast_item) >>> str(cur.execute("SELECT * FROM on_hand").fetchone()[0]) 'fuzzy dice (from 42, at $1.99)' As you saw in the last section you, PyGreSQL also has a typecast function for JSON, which is the default JSON decoder from the standard library. Let's assume we want to use a slight variation of that decoder in which every integer in JSON is converted to a float in Python. This can be accomplished as follows:: >>> from json import loads >>> cast_json = lambda v: loads(v, parse_int=float) >>> pgdb.set_typecast('json', cast_json) >>> cur.execute("SELECT data FROM json_data").fetchone()[0] [1.0, 2.0, 3.0] Note again that you may need to run ``con.type_cache.reset_typecast()`` to make this effective. Also note that the two types ``json`` and ``jsonb`` have their own typecast functions, so if you use ``jsonb`` instead of ``json``, you need to use this type name when setting the typecast function:: >>> pgdb.set_typecast('jsonb', cast_json) As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> con.cursor().execute("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""").fetchone()[0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 9.5). So let' resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].rsplit(',', 1) ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pgdb.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python quite easily:: >>> circle = {c.name: c.circle for c in con.cursor().execute( ... "SELECT * FROM circle").fetchall()} The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle {'C1': Circle(Point(2, 3), 3.0), 'C2': Circle(Point(1, -1), 4.0)} Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] pygresql-5.1.2/docs/contents/pgdb/connection.rst000066400000000000000000000074151365010227600217700ustar00rootroot00000000000000Connection -- The connection object =================================== .. py:currentmodule:: pgdb .. class:: Connection These connection objects respond to the following methods. Note that ``pgdb.Connection`` objects also implement the context manager protocol, i.e. you can use them in a ``with`` statement. When the ``with`` block ends, the current transaction will be automatically committed or rolled back if there was an exception, and you won't need to do this manually. close -- close the connection ----------------------------- .. method:: Connection.close() Close the connection now (rather than whenever it is deleted) :rtype: None The connection will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. commit -- commit the connection ------------------------------- .. method:: Connection.commit() Commit any pending transaction to the database :rtype: None Note that connections always use a transaction, unless you set the :attr:`Connection.autocommit` attribute described below. rollback -- roll back the connection ------------------------------------ .. method:: Connection.rollback() Roll back any pending transaction to the database :rtype: None This method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. cursor -- return a new cursor object ------------------------------------ .. method:: Connection.cursor() Return a new cursor object using the connection :returns: a connection object :rtype: :class:`Cursor` This method returns a new :class:`Cursor` object that can be used to operate on the database in the way described in the next section. Attributes that are not part of the standard -------------------------------------------- .. note:: The following attributes are not part of the DB-API 2 standard. .. attribute:: Connection.closed This is *True* if the connection has been closed or has become invalid .. attribute:: Connection.cursor_type The default cursor type used by the connection If you want to use your own custom subclass of the :class:`Cursor` class with he connection, set this attribute to your custom cursor class. You will then get your custom cursor whenever you call :meth:`Connection.cursor`. .. versionadded:: 5.0 .. attribute:: Connection.type_cache A dictionary with the various type codes for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`TypeCache` class for details. .. versionadded:: 5.0 .. attribute:: Connection.autocommit A read/write attribute to get/set the autocommit mode Normally, all DB-API 2 SQL commands are run inside a transaction. Sometimes this behavior is not desired; there are also some SQL commands such as VACUUM which cannot be run inside a transaction. By setting this attribute to ``True`` you can change this behavior so that no transactions will be started for that connection. In this case every executed SQL command has immediate effect on the database and you don't need to call :meth:`Connection.commit` explicitly. In this mode, you can still use ``with con:`` blocks to run parts of the code using the connection ``con`` inside a transaction. By default, this attribute is set to ``False`` which conforms to the behavior specified by the DB-API 2 standard (manual commit required). .. versionadded:: 5.1 pygresql-5.1.2/docs/contents/pgdb/cursor.rst000066400000000000000000000367011365010227600211460ustar00rootroot00000000000000Cursor -- The cursor object =========================== .. py:currentmodule:: pgdb .. class:: Cursor These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on the level of transaction isolation. The default PostgreSQL transaction isolation level is "read committed". Cursor objects respond to the following methods and attributes. Note that ``Cursor`` objects also implement both the iterator and the context manager protocol, i.e. you can iterate over them and you can use them in a ``with`` statement. description -- details regarding the result columns --------------------------------------------------- .. attribute:: Cursor.description This read-only attribute is a sequence of 7-item named tuples. Each of these named tuples contains information describing one result column: - *name* - *type_code* - *display_size* - *internal_size* - *precision* - *scale* - *null_ok* The values for *precision* and *scale* are only set for numeric types. The values for *display_size* and *null_ok* are always ``None``. This attribute will be ``None`` for operations that do not return rows or if the cursor has not had an operation invoked via the :meth:`Cursor.execute` or :meth:`Cursor.executemany` method yet. .. versionchanged:: 5.0 Before version 5.0, this attribute was an ordinary tuple. rowcount -- number of rows of the result ---------------------------------------- .. attribute:: Cursor.rowcount This read-only attribute specifies the number of rows that the last :meth:`Cursor.execute` or :meth:`Cursor.executemany` call produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT). It is also set by the :meth:`Cursor.copy_from` and :meth:`Cursor.copy_to` methods. The attribute is -1 in case no such method call has been performed on the cursor or the rowcount of the last operation cannot be determined by the interface. close -- close the cursor ------------------------- .. method:: Cursor.close() Close the cursor now (rather than whenever it is deleted) :rtype: None The cursor will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the cursor. execute -- execute a database operation --------------------------------------- .. method:: Cursor.execute(operation, [parameters]) Prepare and execute a database operation (query or command) :param str operation: the database operation :param parameters: a sequence or mapping of parameters :returns: the cursor, so you can chain commands Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: :meth:`Cursor.executemany` should be used instead. Note that in case this method raises a :exc:`DatabaseError`, you can get information about the error condition that has occurred by introspecting its :attr:`DatabaseError.sqlstate` attribute, which will be the ``SQLSTATE`` error code associated with the error. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message. executemany -- execute many similar database operations ------------------------------------------------------- .. method:: Cursor.executemany(operation, [seq_of_parameters]) Prepare and execute many similar database operations (queries or commands) :param str operation: the database operation :param seq_of_parameters: a sequence or mapping of parameter tuples or mappings :returns: the cursor, so you can chain commands Prepare a database operation (query or command) and then execute it against all parameter tuples or mappings found in the sequence *seq_of_parameters*. Parameters are bound to the query using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. callproc -- Call a stored procedure ----------------------------------- .. method:: Cursor.callproc(self, procname, [parameters]): Call a stored database procedure with the given name :param str procname: the name of the database function :param parameters: a sequence of parameters (can be empty or omitted) This method calls a stored procedure (function) in the PostgreSQL database. The sequence of parameters must contain one entry for each input argument that the function expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The function may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. .. versionadded:: 5.0 fetchone -- fetch next row of the query result ---------------------------------------------- .. method:: Cursor.fetchone() Fetch the next row of a query result set :returns: the next row of the query result set :rtype: named tuple or None Fetch the next row of a query result set, returning a single named tuple, or ``None`` when no more data is available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchmany -- fetch next set of rows of the query result ------------------------------------------------------- .. method:: Cursor.fetchmany([size=None], [keep=False]) Fetch the next set of rows of a query result :param size: the number of rows to be fetched :type size: int or None :param keep: if set to true, will keep the passed arraysize :tpye keep: bool :returns: the next set of rows of the query result :rtype: list of named tuples Fetch the next set of rows of a query result, returning a list of named tuples. An empty sequence is returned when no more rows are available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. The number of rows to fetch per call is specified by the *size* parameter. If it is not given, the cursor's :attr:`arraysize` determines the number of rows to be fetched. If you set the *keep* parameter to True, this is kept as new :attr:`arraysize`. The method tries to fetch as many rows as indicated by the *size* parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. Note there are performance considerations involved with the *size* parameter. For optimal performance, it is usually best to use the :attr:`arraysize` attribute. If the *size* parameter is used, then it is best for it to retain the same value from one :meth:`Cursor.fetchmany` call to the next. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchall -- fetch all rows of the query result ---------------------------------------------- .. method:: Cursor.fetchall() Fetch all (remaining) rows of a query result :returns: the set of all rows of the query result :rtype: list of named tuples Fetch all (remaining) rows of a query result, returning them as list of named tuples. The field names of the named tuple are the same as the column names of the database query as long as they are valid as field names for named tuples, otherwise they are given positional names. Note that the cursor's :attr:`arraysize` attribute can affect the performance of this operation. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. arraysize - the number of rows to fetch at a time ------------------------------------------------- .. attribute:: Cursor.arraysize The number of rows to fetch at a time This read/write attribute specifies the number of rows to fetch at a time with :meth:`Cursor.fetchmany`. It defaults to 1, meaning to fetch a single row at a time. Methods and attributes that are not part of the standard -------------------------------------------------------- .. note:: The following methods and attributes are not part of the DB-API 2 standard. .. method:: Cursor.copy_from(stream, table, [format], [sep], [null], [size], [columns]) Copy data from an input stream to the specified table :param stream: the input stream (must be a file-like object, a string or an iterable returning strings) :param str table: the name of a database table :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param int size: the size of the buffer when reading file-like objects :param list column: an optional list of column names :returns: the cursor, so you can chain commands :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from an input stream on the client side to a database table on the server side using the ``COPY FROM`` command. The input stream can be provided in form of a file-like object (which must have a ``read()`` method), a string, or an iterable returning one row or multiple rows of input data on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.copy_to(stream, table, [format], [sep], [null], [decode], [columns]) Copy data from the specified table to an output stream :param stream: the output stream (must be a file-like object or ``None``) :param str table: the name of a database table or a ``SELECT`` query :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param bool decode: whether decoded strings shall be returned for non-binary formats (the default is True in Python 3) :param list column: an optional list of column names :returns: a generator if stream is set to ``None``, otherwise the cursor :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from a database table on the server side to an output stream on the client side using the ``COPY TO`` command. The output stream can be provided in form of a file-like object (which must have a ``write()`` method). Alternatively, if ``None`` is passed as the output stream, the method will return a generator yielding one row of output data on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a ``SELECT`` query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.row_factory(row) Process rows before they are returned :param list row: the currently processed row of the result set :returns: the transformed row that the fetch methods shall return This method is used for processing result rows before returning them through one of the fetch methods. By default, rows are returned as named tuples. You can overwrite this method with a custom row factory if you want to return the rows as different kids of objects. This same row factory will then be used for all result sets. If you overwrite this method, the method :meth:`Cursor.build_row_factory` for creating row factories dynamically will be ignored. Note that named tuples are very efficient and can be easily converted to dicts (even OrderedDicts) by calling ``row._asdict()``. If you still want to return rows as dicts, you can create a custom cursor class like this:: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {key: value for key, value in zip(self.colnames, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances .. versionadded:: 4.0 .. method:: Cursor.build_row_factory() Build a row factory based on the current description :returns: callable with the signature of :meth:`Cursor.row_factory` This method returns row factories for creating named tuples. It is called whenever a new result set is created, and :attr:`Cursor.row_factory` is then assigned the return value of this method. You can overwrite this method with a custom row factory builder if you want to use different row factories for different result sets. Otherwise, you can also simply overwrite the :meth:`Cursor.row_factory` method. This method will then be ignored. The default implementation that delivers rows as named tuples essentially looks like this:: def build_row_factory(self): return namedtuple('Row', self.colnames, rename=True)._make .. versionadded:: 5.0 .. attribute:: Cursor.colnames The list of columns names of the current result set The values in this list are the same values as the *name* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 .. attribute:: Cursor.coltypes The list of columns types of the current result set The values in this list are the same values as the *type_code* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 pygresql-5.1.2/docs/contents/pgdb/index.rst000066400000000000000000000004351365010227600207330ustar00rootroot00000000000000---------------------------------------------- :mod:`pgdb` --- The DB-API Compliant Interface ---------------------------------------------- .. module:: pgdb Contents ======== .. toctree:: introduction module connection cursor types typecache adaptation pygresql-5.1.2/docs/contents/pgdb/introduction.rst000066400000000000000000000013711365010227600223450ustar00rootroot00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the newer :mod:`pgdb` API. **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. .. seealso:: A useful tutorial-like `introduction to the DB-API `_ has been written by Andrew M. Kuchling for the LINUX Journal in 1998. pygresql-5.1.2/docs/contents/pgdb/module.rst000066400000000000000000000157231365010227600211170ustar00rootroot00000000000000Module functions and constants ============================== .. py:currentmodule:: pgdb The :mod:`pgdb` module defines a :func:`connect` function that allows to connect to a database, some global constants describing the capabilities of the module as well as several exception classes. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dsn], [user], [password], [host], [database], [**kwargs]) Return a new connection to the database :param str dsn: data source name as string :param str user: the database user name :param str password: the database password :param str host: the hostname of the database :param database: the name of the database :param dict kwargs: other connection parameters :returns: a connection object :rtype: :class:`Connection` :raises pgdb.OperationalError: error connecting to the database This function takes parameters specifying how to connect to a PostgreSQL database and returns a :class:`Connection` object using these parameters. If specified, the *dsn* parameter must be a string with the format ``'host:base:user:passwd:opt'``. All of the parts specified in the *dsn* are optional. You can also specify the parameters individually using keyword arguments, which always take precedence. The *host* can also contain a port if specified in the format ``'host:port'``. In the *opt* part of the *dsn* you can pass command-line options to the server. You can pass additional connection parameters using the optional *kwargs* keyword arguments. Example:: con = connect(dsn='myhost:mydb', user='guido', password='234$') .. versionchanged:: 5.0.1 Support for additional parameters passed as *kwargs*. get/set/reset_typecast -- Control the global typecast functions --------------------------------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides built-in typecast functions for the common database types, but if you want to change these or add more typecast functions, you can set these up using the following functions. .. note:: The following functions are not part of the DB-API 2 standard. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 As of version 5.0.3 you can also use this method to change the typecasting of PostgreSQL array types. You must run ``set_typecast('anyarray', cast)`` in order to do this. The ``cast`` method must take a string value and a cast function for the base type and return the array converted to a Python object. For instance, run ``set_typecast('anyarray', lambda v, c: v)`` to switch off the casting of arrays completely, and always return them encoded as strings. .. method:: reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`TypeCache` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`TypeCache.get_typecast`, :meth:`TypeCache.set_typecast` and :meth:`TypeCache.reset_typecast` of the :attr:`Connection.type_cache`. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`TypeCache.reset_typecast` on the :attr:`Connection.type_cache`. Module constants ---------------- .. data:: apilevel The string constant ``'2.0'``, stating that the module is DB-API 2.0 level compliant. .. data:: threadsafety The integer constant 1, stating that the module itself is thread-safe, but the connections are not thread-safe, and therefore must be protected with a lock if you want to use them from different threads. .. data:: paramstyle The string constant ``pyformat``, stating that parameters should be passed using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. Errors raised by this module ---------------------------- The errors that can be raised by the :mod:`pgdb` module are the following: .. exception:: Warning Exception raised for important warnings like data truncations while inserting. .. exception:: Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus do not use this class as base. .. exception:: InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. .. exception:: DatabaseError Exception raised for errors that are related to the database. In PyGreSQL, this also has a :attr:`DatabaseError.sqlstate` attribute that contains the ``SQLSTATE`` error code of this error. .. exception:: DataError Exception raised for errors that are due to problems with the processed data like division by zero or numeric value out of range. .. exception:: OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, or a memory allocation error occurred during processing. .. exception:: IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. .. exception:: ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement or wrong number of parameters specified. .. exception:: NotSupportedError Exception raised in case a method or database API was used which is not supported by the database. pygresql-5.1.2/docs/contents/pgdb/typecache.rst000066400000000000000000000066531365010227600216010ustar00rootroot00000000000000TypeCache -- The internal cache for database types ================================================== .. py:currentmodule:: pgdb .. class:: TypeCache .. versionadded:: 5.0 The internal :class:`TypeCache` of PyGreSQL is not part of the DB-API 2 standard, but is documented here in case you need full control and understanding of the internal handling of database types. The TypeCache is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to DB-API 2 "type codes" (which are also returned as the *type_code* field of the :attr:`Cursor.description` attribute). These type codes are strings which are equal to the PostgreSQL internal type name, but they are also carrying additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the OID of the type - *len* -- the internal size - *type* -- ``'b'`` = base, ``'c'`` = composite, ... - *category* -- ``'A'`` = Array, ``'B'`` = Boolean, ... - *delim* -- delimiter to be used when parsing arrays - *relid* -- the table OID for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`TypeCache` provides the following methods: .. method:: TypeCache.get_fields(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: a list of pairs of field names and types :rtype: list .. method:: TypeCache.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. method:: TypeCache.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: TypeCache.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. method:: TypeCache.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that the :class:`TypeCache` is always bound to a database connection. You can also get, set and reset typecast functions on a global level using the functions :func:`pgdb.get_typecast`, :func:`pgdb.set_typecast` and :func:`pgdb.reset_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless call the :meth:`TypeCache.reset_typecast` method on the :attr:`Connection.type_cache` objects of the running connections. pygresql-5.1.2/docs/contents/pgdb/types.rst000066400000000000000000000131151365010227600207670ustar00rootroot00000000000000Type -- Type objects and constructors ===================================== .. py:currentmodule:: pgdb .. _type_constructors: Type constructors ----------------- For binding to an operation's input parameters, PostgreSQL needs to have the input in a particular format. However, from the parameters to the :meth:`Cursor.execute` and :meth:`Cursor.executemany` methods it is not always obvious as which PostgreSQL data types they shall be bound. For instance, a Python string could be bound as a simple ``char`` value, or also as a ``date`` or a ``time``. Or a list could be bound as a ``array`` or a ``json`` object. To make the intention clear in such cases, you can wrap the parameters in type helper objects. PyGreSQL provides the constructors defined below to create such objects that can hold special values. When passed to the cursor methods, PyGreSQL can then detect the proper type of the input parameter and bind it accordingly. The :mod:`pgdb` module exports the following type constructors as part of the DB-API 2 standard: .. function:: Date(year, month, day) Construct an object holding a date value .. function:: Time(hour, [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time value .. function:: Timestamp(year, month, day, [hour], [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time stamp value .. function:: DateFromTicks(ticks) Construct an object holding a date value from the given *ticks* value .. function:: TimeFromTicks(ticks) Construct an object holding a time value from the given *ticks* value .. function:: TimestampFromTicks(ticks) Construct an object holding a time stamp from the given *ticks* value .. function:: Binary(bytes) Construct an object capable of holding a (long) binary string value Additionally, PyGreSQL provides the following constructors for PostgreSQL specific data types: .. function:: Interval(days, hours=0, minutes=0, seconds=0, microseconds=0) Construct an object holding a time interval value .. versionadded:: 5.0 .. function:: Uuid([hex], [bytes], [bytes_le], [fields], [int], [version]) Construct an object holding a UUID value .. versionadded:: 5.0 .. function:: Hstore(dict) Construct a wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj, [encode]) Construct a wrapper for holding an object serializable to JSON You can pass an optional serialization function as a parameter. By default, PyGreSQL uses :func:`json.dumps` to serialize it. .. function:: Literal(sql) Construct a wrapper for holding a literal SQL string .. versionadded:: 5.0 Example for using a type constructor:: >>> cursor.execute("create table jsondata (data jsonb)") >>> data = {'id': 1, 'name': 'John Doe', 'kids': ['Johnnie', 'Janie']} >>> cursor.execute("insert into jsondata values (%s)", [Json(data)]) .. note:: SQL ``NULL`` values are always represented by the Python *None* singleton on input and output. .. _type_objects: Type objects ------------ .. class:: Type The :attr:`Cursor.description` attribute returns information about each of the result columns of a query. The *type_code* must compare equal to one of the :class:`Type` objects defined below. Type objects can be equal to more than one type code (e.g. :class:`DATETIME` is equal to the type codes for ``date``, ``time`` and ``timestamp`` columns). The pgdb module exports the following :class:`Type` objects as part of the DB-API 2 standard: .. object:: STRING Used to describe columns that are string-based (e.g. ``char``, ``varchar``, ``text``) .. object:: BINARY Used to describe (long) binary columns (``bytea``) .. object:: NUMBER Used to describe numeric columns (e.g. ``int``, ``float``, ``numeric``, ``money``) .. object:: DATETIME Used to describe date/time columns (e.g. ``date``, ``time``, ``timestamp``, ``interval``) .. object:: ROWID Used to describe the ``oid`` column of PostgreSQL database tables .. note:: The following more specific type objects are not part of the DB-API 2 standard. .. object:: BOOL Used to describe ``boolean`` columns .. object:: SMALLINT Used to describe ``smallint`` columns .. object:: INTEGER Used to describe ``integer`` columns .. object:: LONG Used to describe ``bigint`` columns .. object:: FLOAT Used to describe ``float`` columns .. object:: NUMERIC Used to describe ``numeric`` columns .. object:: MONEY Used to describe ``money`` columns .. object:: DATE Used to describe ``date`` columns .. object:: TIME Used to describe ``time`` columns .. object:: TIMESTAMP Used to describe ``timestamp`` columns .. object:: INTERVAL Used to describe date and time ``interval`` columns .. object:: UUID Used to describe ``uuid`` columns .. object:: HSTORE Used to describe ``hstore`` columns .. versionadded:: 5.0 .. object:: JSON Used to describe ``json`` and ``jsonb`` columns .. versionadded:: 5.0 .. object:: ARRAY Used to describe columns containing PostgreSQL arrays .. versionadded:: 5.0 .. object:: RECORD Used to describe columns containing PostgreSQL records .. versionadded:: 5.0 Example for using some type objects:: >>> cursor = con.cursor() >>> cursor.execute("create table jsondata (created date, data jsonb)") >>> cursor.execute("select * from jsondata") >>> (created, data) = (d.type_code for d in cursor.description) >>> created == DATE True >>> created == DATETIME True >>> created == TIME False >>> data == JSON True >>> data == STRING False pygresql-5.1.2/docs/contents/postgres/000077500000000000000000000000001365010227600200225ustar00rootroot00000000000000pygresql-5.1.2/docs/contents/postgres/advanced.rst000066400000000000000000000112061365010227600223210ustar00rootroot00000000000000Examples for advanced features ============================== .. py:currentmodule:: pg In this section, we show how to use some advanced features of PostgreSQL using the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Inheritance ----------- A table can inherit from zero or more tables. A query can reference either all rows of a table or all rows of a table plus all of its descendants. For example, the capitals table inherits from cities table (it inherits all data fields from cities):: >>> data = [('cities', [ ... "'San Francisco', 7.24E+5, 63", ... "'Las Vegas', 2.583E+5, 2174", ... "'Mariposa', 1200, 1953"]), ... ('capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"])] Now, let's populate the tables:: >>> data = ['cities', [ ... "'San Francisco', 7.24E+5, 63" ... "'Las Vegas', 2.583E+5, 2174" ... "'Mariposa', 1200, 1953"], ... 'capitals', [ ... "'Sacramento',3.694E+5,30,'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"]] >>> for table, rows in data: ... for row in rows: ... query("INSERT INTO %s VALUES (%s)" % (table, row)) >>> print(query("SELECT * FROM cities")) name |population|altitude -------------+----------+-------- San Francisco| 724000| 63 Las Vegas | 258300| 2174 Mariposa | 1200| 1953 Sacramento | 369400| 30 Madison | 191300| 845 (5 rows) >>> print(query("SELECT * FROM capitals")) name |population|altitude|state ----------+----------+--------+----- Sacramento| 369400| 30|CA Madison | 191300| 845|WI (2 rows) You can find all cities, including capitals, that are located at an altitude of 500 feet or higher by:: >>> print(query("""SELECT c.name, c.altitude ... FROM cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 Madison | 845 (3 rows) On the other hand, the following query references rows of the base table only, i.e. it finds all cities that are not state capitals and are situated at an altitude of 500 feet or higher:: >>> print(query("""SELECT name, altitude ... FROM ONLY cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 (2 rows) Arrays ------ Attributes can be arrays of base types or user-defined types:: >>> query("""CREATE TABLE sal_emp ( ... name text, ... pay_by_quarter int4[], ... pay_by_extra_quarter int8[], ... schedule text[][])""") Insert instances with array attributes. Note the use of braces:: >>> query("""INSERT INTO sal_emp VALUES ( ... 'Bill', '{10000,10000,10000,10000}', ... '{9223372036854775800,9223372036854775800,9223372036854775800}', ... '{{"meeting", "lunch"}, {"training", "presentation"}}')""") >>> query("""INSERT INTO sal_emp VALUES ( ... 'Carol', '{20000,25000,25000,25000}', ... '{9223372036854775807,9223372036854775807,9223372036854775807}', ... '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""") Queries on array attributes:: >>> query("""SELECT name FROM sal_emp WHERE ... sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""") name ----- Carol (1 row) Retrieve third quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") pay_by_quarter -------------- 10000 25000 (2 rows) Retrieve third quarter extra pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") pay_by_extra_quarter -------------------- 9223372036854775800 9223372036854775807 (2 rows) Retrieve first two quarters of extra quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") pay_by_extra_quarter ----------------------------------------- {9223372036854775800,9223372036854775800} {9223372036854775807,9223372036854775807} (2 rows) Select subarrays:: >>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp ... WHERE sal_emp.name = 'Bill'""") schedule ---------------------- {{meeting},{training}} (1 row) pygresql-5.1.2/docs/contents/postgres/basic.rst000066400000000000000000000302001365010227600216300ustar00rootroot00000000000000Basic examples ============== .. py:currentmodule:: pg In this section, we demonstrate how to use some of the very basic features of PostgreSQL using the classic PyGreSQL interface. Creating a connection to the database ------------------------------------- We start by creating a **connection** to the PostgreSQL database:: >>> from pg import DB >>> db = DB() If you pass no parameters when creating the :class:`DB` instance, then PyGreSQL will try to connect to the database on the local host that has the same name as the current user, and also use that name for login. You can also pass the database name, host, port and login information as parameters when creating the :class:`DB` instance:: >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') The :class:`DB` class of which ``db`` is an object is a wrapper around the lower level :class:`Connection` class of the :mod:`pg` module. The most important method of such connection objects is the ``query`` method that allows you to send SQL commands to the database. Creating tables --------------- The first thing you would want to do in an empty database is creating a table. To do this, you need to send a **CREATE TABLE** command to the database. PostgreSQL has its own set of built-in types that can be used for the table columns. Let us create two tables "weather" and "cities":: >>> db.query("""CREATE TABLE weather ( ... city varchar(80), ... temp_lo int, temp_hi int, ... prcp float8, ... date date)""") >>> db.query("""CREATE TABLE cities ( ... name varchar(80), ... location point)""") .. note:: Keywords are case-insensitive but identifiers are case-sensitive. You can get a list of all tables in the database with:: >>> db.get_tables() ['public.cities', 'public.weather'] Insert data ----------- Now we want to fill our tables with data. An **INSERT** statement is used to insert a new row into a table. There are several ways you can specify what columns the data should go to. Let us insert a row into each of these tables. The simplest case is when the list of values corresponds to the order of the columns specified in the CREATE TABLE command:: >>> db.query("""INSERT INTO weather ... VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") >>> db.query("""INSERT INTO cities ... VALUES ('San Francisco', '(-194.0, 53.0)')""") You can also specify the columns to which the values correspond. The columns can be specified in any order. You may also omit any number of columns, such as with unknown precipitation, below:: >>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo) ... VALUES ('11/29/1994', 'Hayward', 54, 37)""") If you get errors regarding the format of the date values, your database is probably set to a different date style. In this case you must change the date style like this:: >>> db.query("set datestyle = MDY") Instead of explicitly writing the INSERT statement and sending it to the database with the :meth:`DB.query` method, you can also use the more convenient :meth:`DB.insert` method that does the same under the hood:: >>> db.insert('weather', ... date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37) And instead of using keyword parameters, you can also pass the values to the :meth:`DB.insert` method in a single Python dictionary. If you have a Python list with many rows that shall be used to fill a database table quickly, you can use the :meth:`DB.inserttable` method. Retrieving data --------------- After having entered some data into our tables, let's see how we can get the data out again. A **SELECT** statement is used for retrieving data. The basic syntax is: .. code-block:: psql SELECT columns FROM tables WHERE predicates A simple one would be the following query:: >>> q = db.query("SELECT * FROM weather") >>> print(q) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 37| 54| |1994-11-29 (2 rows) You may also specify expressions in the target list. (The 'AS column' specifies the column name of the result. It is optional.) :: >>> print(db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date ... FROM weather""")) city |temp_avg| date -------------+--------+---------- San Francisco| 48|1994-11-27 Hayward | 45|1994-11-29 (2 rows) If you want to retrieve rows that satisfy certain condition (i.e. a restriction), specify the condition in a WHERE clause. The following retrieves the weather of San Francisco on rainy days:: >>> print(db.query("""SELECT * FROM weather ... WHERE city = 'San Francisco' AND prcp > 0.0""")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Here is a more complicated one. Duplicates are removed when DISTINCT is specified. ORDER BY specifies the column to sort on. (Just to make sure the following won't confuse you, DISTINCT and ORDER BY can be used separately.) :: >>> print(db.query("SELECT DISTINCT city FROM weather ORDER BY city")) city ------------- Hayward San Francisco (2 rows) So far we have only printed the output of a SELECT query. The object that is returned by the query is an instance of the :class:`Query` class that can print itself in the nicely formatted way we saw above. But you can also retrieve the results as a list of tuples, by using the :meth:`Query.getresult` method:: >>> from pprint import pprint >>> q = db.query("SELECT * FROM weather") >>> pprint(q.getresult()) [('San Francisco', 46, 50, 0.25, '1994-11-27'), ('Hayward', 37, 54, None, '1994-11-29')] Here we used pprint to print out the returned list in a nicely formatted way. If you want to retrieve the results as a list of dictionaries instead of tuples, use the :meth:`Query.dictresult` method instead:: >>> pprint(q.dictresult()) [{'city': 'San Francisco', 'date': '1994-11-27', 'prcp': 0.25, 'temp_hi': 50, 'temp_lo': 46}, {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37}] Finally, you can also retrieve the results as a list of named tuples, using the :meth:`Query.namedresult` method. This can be a good compromise between simple tuples and the more memory intensive dictionaries: >>> for row in q.namedresult(): ... print(row.city, row.date) ... San Francisco 1994-11-27 Hayward 1994-11-29 If you only want to retrieve a single row of data, you can use the more convenient :meth:`DB.get` method that does the same under the hood:: >>> d = dict(city='Hayward') >>> db.get('weather', d, 'city') >>> pprint(d) {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37} As you see, the :meth:`DB.get` method returns a dictionary with the column names as keys. In the third parameter you can specify which column should be looked up in the WHERE statement of the SELECT statement that is executed by the :meth:`DB.get` method. You normally don't need it when the table was created with a primary key. Retrieving data into other tables --------------------------------- A SELECT ... INTO statement can be used to retrieve data into another table:: >>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather ... WHERE city = 'San Francisco' and prcp > 0.0""") This fills a temporary table "temptab" with a subset of the data in the original "weather" table. It can be listed with:: >>> print(db.query("SELECT * from temptab")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Aggregates ---------- Let's try the following query:: >>> print(db.query("SELECT max(temp_lo) FROM weather")) max --- 46 (1 row) You can also use aggregates with the GROUP BY clause:: >>> print(db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city")) city |max -------------+--- Hayward | 37 San Francisco| 46 (2 rows) Joining tables -------------- Queries can access multiple tables at once or access the same table in such a way that multiple instances of the table are being processed at the same time. Suppose we want to find all the records that are in the temperature range of other records. W1 and W2 are aliases for weather. We can use the following query to achieve that:: >>> print(db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, ... W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 ... WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""")) city |temp_lo|temp_hi| city |temp_lo|temp_hi -------+-------+-------+-------------+-------+------- Hayward| 37| 54|San Francisco| 46| 50 (1 row) Now let's join two different tables. The following joins the "weather" table and the "cities" table:: >>> print(db.query("""SELECT city, location, prcp, date ... FROM weather, cities ... WHERE name = city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Since the column names are all different, we don't have to specify the table name. If you want to be clear, you can do the following. They give identical results, of course:: >>> print(db.query("""SELECT w.city, c.location, w.prcp, w.date ... FROM weather w, cities c WHERE c.name = w.city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Updating data ------------- It you want to change the data that has already been inserted into a database table, you will need the **UPDATE** statement. Suppose you discover the temperature readings are all off by 2 degrees as of Nov 28, you may update the data as follow:: >>> db.query("""UPDATE weather ... SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 ... WHERE date > '11/28/1994'""") '1' >>> print(db.query("SELECT * from weather")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 35| 52| |1994-11-29 (2 rows) Note that the UPDATE statement returned the string ``'1'``, indicating that exactly one row of data has been affected by the update. If you retrieved one row of data as a dictionary using the :meth:`DB.get` method, then you can also update that row with the :meth:`DB.update` method. Deleting data ------------- To delete rows from a table, a **DELETE** statement can be used. Suppose you are no longer interested in the weather of Hayward, you can do the following to delete those rows from the table:: >>> db.query("DELETE FROM weather WHERE city = 'Hayward'") '1' Again, you get the string ``'1'`` as return value, indicating that exactly one row of data has been deleted. You can also delete all the rows in a table by doing the following. This is different from DROP TABLE which removes the table itself in addition to the removing the rows, as explained in the next section. :: >>> db.query("DELETE FROM weather") '1' >>> print(db.query("SELECT * from weather")) city|temp_lo|temp_hi|prcp|date ----+-------+-------+----+---- (0 rows) Since only one row was left in the table, the DELETE query again returns the string ``'1'``. The SELECT query now gives an empty result. If you retrieved a row of data as a dictionary using the :meth:`DB.get` method, then you can also delete that row with the :meth:`DB.delete` method. Removing the tables ------------------- The **DROP TABLE** command is used to remove tables. After you have done this, you can no longer use those tables:: >>> db.query("DROP TABLE weather, cities") >>> db.query("select * from weather") pg.ProgrammingError: Error: Relation "weather" does not exist pygresql-5.1.2/docs/contents/postgres/func.rst000066400000000000000000000115201365010227600215060ustar00rootroot00000000000000Examples for using SQL functions ================================ .. py:currentmodule:: pg We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Creating SQL Functions on Base Types ------------------------------------ A **CREATE FUNCTION** statement lets you create a new function that can be used in expressions (in SELECT, INSERT, etc.). We will start with functions that return values of base types. Let's create a simple SQL function that takes no arguments and returns 1:: >>> query("""CREATE FUNCTION one() RETURNS int4 ... AS 'SELECT 1 as ONE' LANGUAGE SQL""") Functions can be used in any expressions (eg. in the target list or qualifications):: >>> print(db.query("SELECT one() AS answer")) answer ------ 1 (1 row) Here's how you create a function that takes arguments. The following function returns the sum of its two arguments:: >>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 ... AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""") >>> print(query("SELECT add_em(1, 2) AS answer")) answer ------ 3 (1 row) Creating SQL Functions on Composite Types ----------------------------------------- It is also possible to create functions that return values of composite types. Before we create more sophisticated functions, let's populate an EMP table:: >>> query("""CREATE TABLE EMP ( ... name text, ... salary int4, ... age f int4, ... dept varchar(16))""") >>> emps = ["'Sam', 1200, 16, 'toy'", ... "'Claire', 5000, 32, 'shoe'", ... "'Andy', -1000, 2, 'candy'", ... "'Bill', 4200, 36, 'shoe'", ... "'Ginger', 4800, 30, 'candy'"] >>> for emp in emps: ... query("INSERT INTO EMP VALUES (%s)" % emp) Every INSERT statement will return a '1' indicating that it has inserted one row into the EMP table. The argument of a function can also be a tuple. For instance, *double_salary* takes a tuple of the EMP table:: >>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 ... AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""") >>> print(query("""SELECT name, double_salary(EMP) AS dream ... FROM EMP WHERE EMP.dept = 'toy'""")) name|dream ----+----- Sam | 2400 (1 row) The return value of a function can also be a tuple. However, make sure that the expressions in the target list are in the same order as the columns of EMP:: >>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$ ... SELECT 'None'::text AS name, ... 1000 AS salary, ... 25 AS age, ... 'None'::varchar(16) AS dept ... $$ LANGUAGE SQL""") You can then extract a column out of the resulting tuple by using the "function notation" for projection columns (i.e. ``bar(foo)`` is equivalent to ``foo.bar``). Note that ``new_emp().name`` isn't supported:: >>> print(query("SELECT name(new_emp()) AS nobody")) nobody ------ None (1 row) Let's try one more function that returns tuples:: >>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP ... AS 'SELECT * FROM EMP where salary > 1500' ... LANGUAGE SQL""") >>> query("SELECT name(high_pay()) AS overpaid") overpaid -------- Claire Bill Ginger (3 rows) Creating SQL Functions with multiple SQL statements --------------------------------------------------- You can also create functions that do more than just a SELECT. You may have noticed that Andy has a negative salary. We'll create a function that removes employees with negative salaries:: >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Andy | -1000| 2|candy Bill | 4200| 36|shoe Ginger| 4800| 30|candy (5 rows) >>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS ... 'DELETE FROM EMP WHERE EMP.salary < 0; ... SELECT 1 AS ignore_this' ... LANGUAGE SQL""") >>> query("SELECT clean_EMP()") clean_emp --------- 1 (1 row) >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Bill | 4200| 36|shoe Ginger| 4800| 30|candy (4 rows) Remove functions that were created in this example -------------------------------------------------- We can remove the functions that we have created in this example and the table EMP, by using the DROP command:: query("DROP FUNCTION clean_EMP()") query("DROP FUNCTION high_pay()") query("DROP FUNCTION new_emp()") query("DROP FUNCTION add_em(int4, int4)") query("DROP FUNCTION one()") query("DROP TABLE EMP CASCADE") pygresql-5.1.2/docs/contents/postgres/index.rst000066400000000000000000000006351365010227600216670ustar00rootroot00000000000000------------------- A PostgreSQL Primer ------------------- The examples in this chapter of the documentation have been taken from the PostgreSQL manual. They demonstrate some PostgreSQL features using the classic PyGreSQL interface. They can serve as an introduction to PostgreSQL, but not so much as examples for the use of PyGreSQL. Contents ======== .. toctree:: basic advanced func syscat pygresql-5.1.2/docs/contents/postgres/syscat.rst000066400000000000000000000110761365010227600220670ustar00rootroot00000000000000Examples for using the system catalogs ====================================== .. py:currentmodule:: pg The system catalogs are regular tables where PostgreSQL stores schema metadata, such as information about tables and columns, and internal bookkeeping information. You can drop and recreate the tables, add columns, insert and update values, and severely mess up your system that way. Normally, one should not change the system catalogs by hand: there are SQL commands to make all supported changes. For example, CREATE DATABASE inserts a row into the *pg_database* catalog — and actually creates the database on disk. It this section we want to show examples for how to parse some of the system catalogs, making queries with the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Lists indices ------------- This query lists all simple indices in the database:: print(query("""SELECT bc.relname AS class_name, ic.relname AS index_name, a.attname FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid AND NOT a.attisdropped AND a.attnum>0 ORDER BY class_name, index_name, attname""")) List user defined attributes ---------------------------- This query lists all user-defined attributes and their types in user-defined tables:: print(query("""SELECT c.relname, a.attname, format_type(a.atttypid, a.atttypmod) FROM pg_class c, pg_attribute a WHERE c.relkind = 'r' AND c.relnamespace!=ALL(ARRAY[ 'pg_catalog','pg_toast', 'information_schema']::regnamespace[]) AND a.attnum > 0 AND a.attrelid = c.oid AND NOT a.attisdropped ORDER BY relname, attname""")) List user defined base types ---------------------------- This query lists all user defined base types:: print(query("""SELECT r.rolname, t.typname FROM pg_type t, pg_authid r WHERE r.oid = t.typowner AND t.typrelid = '0'::oid and t.typelem = '0'::oid AND r.rolname != 'postgres' ORDER BY rolname, typname""")) List operators -------------- This query lists all right-unary operators:: print(query("""SELECT o.oprname AS right_unary, lt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type lt, pg_type result WHERE o.oprkind='r' and o.oprleft = lt.oid AND o.oprresult = result.oid ORDER BY operand""")) This query lists all left-unary operators:: print(query("""SELECT o.oprname AS left_unary, rt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type result WHERE o.oprkind='l' AND o.oprright = rt.oid AND o.oprresult = result.oid ORDER BY operand""")) And this one lists all of the binary operators:: print(query("""SELECT o.oprname AS binary_op, rt.typname AS right_opr, lt.typname AS left_opr, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type lt, pg_type result WHERE o.oprkind = 'b' AND o.oprright = rt.oid AND o.oprleft = lt.oid AND o.oprresult = result.oid""")) List functions of a language ---------------------------- Given a programming language, this query returns the name, args and return type from all functions of a language:: language = 'sql' print(query("""SELECT p.proname, p.pronargs, t.typname FROM pg_proc p, pg_language l, pg_type t WHERE p.prolang = l.oid AND p.prorettype = t.oid AND l.lanname = $1 ORDER BY proname""", (language,))) List aggregate functions ------------------------ This query lists all of the aggregate functions and the type to which they can be applied:: print(query("""SELECT p.proname, t.typname FROM pg_aggregate a, pg_proc p, pg_type t WHERE a.aggfnoid = p.oid and p.proargtypes[0] = t.oid ORDER BY proname, typname""")) List operator families ---------------------- The following query lists all defined operator families and all the operators included in each family:: print(query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator FROM pg_am am, pg_opfamily opf, pg_amop amop WHERE opf.opfmethod = am.oid AND amop.amopfamily = opf.oid ORDER BY amname, opfname, amopopr""")) pygresql-5.1.2/docs/contents/tutorial.rst000066400000000000000000000225541365010227600205610ustar00rootroot00000000000000First Steps with PyGreSQL ========================= In this small tutorial we show you the basic operations you can perform with both flavors of the PyGreSQL interface. Please choose your flavor: .. contents:: :local: First Steps with the classic PyGreSQL Interface ----------------------------------------------- .. py:currentmodule:: pg Before doing anything else, it's necessary to create a database connection. To do this, simply import the :class:`DB` wrapper class and create an instance of it, passing the necessary connection parameters, like this:: >>> from pg import DB >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') You can omit one or even all parameters if you want to use their default values. PostgreSQL will use the name of the current operating system user as the login and the database name, and will try to connect to the local host on port 5432 if nothing else is specified. The `db` object has all methods of the lower-level :class:`Connection` class plus some more convenience methods provided by the :class:`DB` wrapper. You can now execute database queries using the :meth:`DB.query` method:: >>> db.query("create table fruits(id serial primary key, name varchar)") You can list all database tables with the :meth:`DB.get_tables` method:: >>> db.get_tables() ['public.fruits'] To get the attributes of the *fruits* table, use :meth:`DB.get_attnames`:: >>> db.get_attnames('fruits') {'id': 'int', 'name': 'text'} Verify that you can insert into the newly created *fruits* table: >>> db.has_table_privilege('fruits', 'insert') True You can insert a new row into the table using the :meth:`DB.insert` method, for example:: >>> db.insert('fruits', name='apple') {'name': 'apple', 'id': 1} Note how this method returns the full row as a dictionary including its *id* column that has been generated automatically by a database sequence. You can also pass a dictionary to the :meth:`DB.insert` method instead of or in addition to using keyword arguments. Let's add another row to the table: >>> banana = db.insert('fruits', name='banana') Or, you can add a whole bunch of fruits at the same time using the :meth:`Connection.inserttable` method. Note that this method uses the COPY command of PostgreSQL to insert all data in one batch operation, which is much faster than sending many individual INSERT commands:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> data = list(enumerate(more_fruits, start=3)) >>> db.inserttable('fruits', data) We can now query the database for all rows that have been inserted into the *fruits* table:: >>> print(db.query('select * from fruits')) id| name --+---------- 1|apple 2|banana 3|cherimaya 4|durian 5|eggfruit 6|fig 7|grapefruit (7 rows) Instead of simply printing the :class:`Query` instance that has been returned by this query, we can also request the data as list of tuples:: >>> q = db.query('select * from fruits') >>> q.getresult() ... [(1, 'apple'), ..., (7, 'grapefruit')] Instead of a list of tuples, we can also request a list of dicts:: >>> q.dictresult() [{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}] You can also return the rows as named tuples:: >>> rows = q.namedresult() >>> rows[3].name 'durian' In PyGreSQL 5.1 and newer, you can also use the :class:`Query` instance directly as an iterable that yields the rows as tuples, and there are also methods that return iterables for rows as dictionaries, named tuples or scalar values. Other methods like :meth:`Query.one` or :meth:`Query.onescalar` return only one row or only the first field of that row. You can get the number of rows with the :func:`len` function. Using the method :meth:`DB.get_as_dict`, you can easily import the whole table into a Python dictionary mapping the primary key *id* to the *name*:: >>> db.get_as_dict('fruits', scalar=True) OrderedDict([(1, 'apple'), (2, 'banana'), (3, 'cherimaya'), (4, 'durian'), (5, 'eggfruit'), (6, 'fig'), (7, 'grapefruit')]) To change a single row in the database, you can use the :meth:`DB.update` method. For instance, if you want to capitalize the name 'banana':: >>> db.update('fruits', banana, name=banana['name'].capitalize()) {'id': 2, 'name': 'Banana'} >>> print(db.query('select * from fruits where id between 1 and 3')) id| name --+--------- 1|apple 2|Banana 3|cherimaya (3 rows) Let's also capitalize the other names in the database:: >>> db.query('update fruits set name=initcap(name)') '7' The returned string `'7'` tells us the number of updated rows. It is returned as a string to discern it from an OID which will be returned as an integer, if a new row has been inserted into a table with an OID column. To delete a single row from the database, use the :meth:`DB.delete` method:: >>> db.delete('fruits', banana) 1 The returned integer value `1` tells us that one row has been deleted. If we try it again, the method returns the integer value `0`. Naturally, this method can only return 0 or 1:: >>> db.delete('fruits', banana) 0 Of course, we can insert the row back again:: >>> db.insert('fruits', banana) {'id': 2, 'name': 'Banana'} If we want to change a different row, we can get its current state with:: >>> apple = db.get('fruits', 1) >>> apple {'name': 'Apple', 'id': 1} We can duplicate the row like this:: >>> db.insert('fruits', apple, id=8) {'id': 8, 'name': 'Apple'} To remove the duplicated row, we can do:: >>> db.delete('fruits', id=8) 1 Finally, to remove the table from the database and close the connection:: >>> db.query("drop table fruits") >>> db.close() For more advanced features and details, see the reference: :doc:`pg/index` First Steps with the DB-API 2.0 Interface ----------------------------------------- .. py:currentmodule:: pgdb As with the classic interface, the first thing you need to do is to create a database connection. To do this, use the function :func:`pgdb.connect` in the :mod:`pgdb` module, passing the connection parameters:: >>> from pgdb import connect >>> con = connect(database='testdb', host='pgserver:5432', ... user='scott', password='tiger') As in the classic interface, you can omit parameters if they are the default values used by PostgreSQL. To do anything with the connection, you need to request a cursor object from it, which is thought of as the Python representation of a database cursor. The connection has a method that lets you get a cursor:: >>> cursor = con.cursor() The cursor has a method that lets you execute database queries:: >>> cursor.execute("create table fruits(" ... "id serial primary key, name varchar)") You can also use this method to insert data into the table:: >>> cursor.execute("insert into fruits (name) values ('apple')") You can pass parameters in a safe way:: >>> cursor.execute("insert into fruits (name) values (%s)", ('banana',)) To insert multiple rows at once, you can use the following method:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> parameters = [(name,) for name in more_fruits] >>> cursor.executemany("insert into fruits (name) values (%s)", parameters) The cursor also has a :meth:`Cursor.copy_from` method to quickly insert large amounts of data into the database, and a :meth:`Cursor.copy_to` method to quickly dump large amounts of data from the database, using the PostgreSQL COPY command. Note however, that these methods are an extension provided by PyGreSQL, they are not part of the DB-API 2 standard. Also note that the DB API 2.0 interface does not have an autocommit as you may be used from PostgreSQL. So in order to make these inserts permanent, you need to commit them to the database:: >>> con.commit() If you end the program without calling the commit method of the connection, or if you call the rollback method of the connection, then the changes will be discarded. In a similar way, you can update or delete rows in the database, executing UPDATE or DELETE statements instead of INSERT statements. To fetch rows from the database, execute a SELECT statement first. Then you can use one of several fetch methods to retrieve the results. For instance, to request a single row:: >>> cursor.execute('select * from fruits where id=1') >>> cursor.fetchone() Row(id=1, name='apple') The result is a named tuple. This means you can access its elements either using an index number as for an ordinary tuple, or using the column name as for access to object attributes. To fetch all rows of the query, use this method instead:: >>> cursor.execute('select * from fruits') >>> cursor.fetchall() [Row(id=1, name='apple'), ..., Row(id=7, name='grapefruit')] The output is a list of named tuples. If you want to fetch only a limited number of rows from the query:: >>> cursor.execute('select * from fruits') >>> cursor.fetchmany(2) [Row(id=1, name='apple'), Row(id=2, name='banana')] Finally, to remove the table from the database and close the connection:: >>> db.execute("drop table fruits") >>> cur.close() >>> con.close() For more advanced features and details, see the reference: :doc:`pgdb/index` pygresql-5.1.2/docs/copyright.rst000066400000000000000000000024551365010227600170670ustar00rootroot00000000000000Copyright notice ================ Written by D'Arcy J.M. Cain (darcy@druid.net) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Further modifications copyright (c) 2009-2020 by the PyGreSQL team. Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. pygresql-5.1.2/docs/download/000077500000000000000000000000001365010227600161265ustar00rootroot00000000000000pygresql-5.1.2/docs/download/download.rst000066400000000000000000000024711365010227600204730ustar00rootroot00000000000000Current PyGreSQL versions ------------------------- You can download PyGreSQL from the **Python Package Index** at * https://pypi.org/project/PyGreSQL/#files **Linux RPM** packages can be found attached to the GitHub release at * https://github.com/PyGreSQL/PyGreSQL/releases/ **CentOS** packages can be found on the pkcs.org site * https://pkgs.org/search/?q=pygresql **Debian** packages can be found at * https://packages.debian.org/search?suite=all&searchon=names&keywords=pygresql **FreeBSD** packages are available in their ports collection * http://www.freebsd.org/cgi/cvsweb.cgi/ports/databases/py-PyGreSQL/ **NetBSD** packages are available in their pkgsrc collection * https://pkgsrc.se/databases/py-postgresql **openSUSE** packages are available through their build service at * https://software.opensuse.org/package/PyGreSQL?search_term=pygresql **Ubuntu** packages are available from Launchpad at * https://launchpad.net/ubuntu/+source/pygresql **Windows binaries** (executables and wheels) are available at * https://pypi.org/project/PyGreSQL/#files **Windows MSI** packages are attached to the GitHub release at * https://github.com/PyGreSQL/PyGreSQL/releases/ Older PyGreSQL versions ----------------------- You can look for older PyGreSQL versions at * https://pypi.org/project/PyGreSQL/#history pygresql-5.1.2/docs/download/files.rst000066400000000000000000000020031365010227600177550ustar00rootroot00000000000000Distribution files ------------------ ============== = pgmodule.c the main source file for the C extension module (_pg) pgconn.c the connection object pginternal.c internal functions pglarge.c large object support pgnotice.c the notice object pgquery.c the query object pgsource.c the source object pgtypes.h PostgreSQL type definitions py3c.h Python 2/3 compatibility layer for the C extension pg.py the "classic" PyGreSQL module pgdb.py a DB-SIG DB-API 2.0 compliant API wrapper for PyGreSQL setup.py the Python setup script To install PyGreSQL, you can run "python setup.py install". setup.cfg the Python setup configuration docs/ documentation directory The documentation has been created with Sphinx. All text files are in ReST format; a HTML version of the documentation can be created with "make html". tests/ a suite of unit tests for PyGreSQL ============== = pygresql-5.1.2/docs/download/index.rst000066400000000000000000000011141365010227600177640ustar00rootroot00000000000000Download information ==================== .. include:: download.rst News, Changes and Future Development ------------------------------------ See the :doc:`../announce` for current news. For a list of all changes in the current version |version| and in past versions, have a look at the :doc:`../contents/changelog`. The section on :doc:`../community/index` lists ideas for future developments and ways to participate. Installation ------------ Please read the chapter on :doc:`../contents/install` in our documentation. .. include:: files.rst .. include:: ../community/homes.rstpygresql-5.1.2/docs/make.bat000066400000000000000000000161251365010227600157310ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyGreSQL.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyGreSQL.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end pygresql-5.1.2/docs/requirements.txt000066400000000000000000000000241365010227600175770ustar00rootroot00000000000000cloud_sptheme>=1.7.1pygresql-5.1.2/docs/start.txt000066400000000000000000000003571365010227600162220ustar00rootroot00000000000000.. PyGreSQL index page without toc (for use with cloud theme) Welcome to PyGreSQL =================== .. toctree:: :hidden: copyright announce download/index contents/index community/index .. include:: about.txtpygresql-5.1.2/docs/toc.txt000066400000000000000000000003451365010227600156470ustar00rootroot00000000000000.. PyGreSQL index page with toc (for use without cloud theme) Welcome to PyGreSQL =================== .. toctree:: :maxdepth: 2 about copyright announce download/index contents/index community/indexpygresql-5.1.2/pg.py000066400000000000000000003074741365010227600143660ustar00rootroot00000000000000#!/usr/bin/python # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the classic pg module. # # Copyright (c) 2020 by the PyGreSQL Development Team # # The notification handler is based on pgnotify which is # Copyright (c) 2001 Ng Pheng Siong. All rights reserved. # # Please see the LICENSE.TXT file for specific restrictions. """PyGreSQL classic interface. This pg module implements some basic database management stuff. It includes the _pg module and builds on it, providing the higher level wrapper class named DB with additional functionality. This is known as the "classic" ("old style") PyGreSQL interface. For a DB-API 2 compliant interface use the newer pgdb module. """ from __future__ import print_function, division try: from _pg import * except ImportError: import os import sys # see https://docs.python.org/3/whatsnew/3.8.html#ctypes if os.name == 'nt' and sys.version_info >= (3, 8): for path in os.environ["PATH"].split(os.pathsep): if os.path.exists(os.path.join(path, 'libpq.dll')): with os.add_dll_directory(os.path.abspath(path)): from _pg import * break else: raise else: raise __version__ = version __all__ = [ 'DB', 'Adapter', 'NotificationHandler', 'Typecasts', 'Bytea', 'Hstore', 'Json', 'Literal', 'Error', 'Warning', 'DataError', 'DatabaseError', 'IntegrityError', 'InterfaceError', 'InternalError', 'InvalidResultError', 'MultipleResultsError', 'NoResultError', 'NotSupportedError', 'OperationalError', 'ProgrammingError', 'INV_READ', 'INV_WRITE', 'SEEK_CUR', 'SEEK_END', 'SEEK_SET', 'TRANS_ACTIVE', 'TRANS_IDLE', 'TRANS_INERROR', 'TRANS_INTRANS', 'TRANS_UNKNOWN', 'cast_array', 'cast_hstore', 'cast_record', 'connect', 'escape_bytea', 'escape_string', 'unescape_bytea', 'get_array', 'get_bool', 'get_bytea_escaped', 'get_datestyle', 'get_decimal', 'get_decimal_point', 'get_defbase', 'get_defhost', 'get_defopt', 'get_defport', 'get_defuser', 'get_jsondecode', 'set_array', 'set_bool', 'set_bytea_escaped', 'set_datestyle', 'set_decimal', 'set_decimal_point', 'set_defbase', 'set_defhost', 'set_defopt', 'set_defpasswd', 'set_defport', 'set_defuser', 'set_jsondecode', 'set_query_helpers', 'version', '__version__'] import select import warnings import weakref from datetime import date, time, datetime, timedelta, tzinfo from decimal import Decimal from math import isnan, isinf from collections import namedtuple from keyword import iskeyword from operator import itemgetter from functools import partial from re import compile as regex from json import loads as jsondecode, dumps as jsonencode from uuid import UUID try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences basestring except NameError: # Python >= 3.0 basestring = (str, bytes) try: from functools import lru_cache except ImportError: # Python < 3.2 from functools import update_wrapper try: from _thread import RLock except ImportError: class RLock: # for builds without threads def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): pass def lru_cache(maxsize=128): """Simplified functools.lru_cache decorator for one argument.""" def decorator(function): sentinel = object() cache = {} get = cache.get lock = RLock() root = [] root_full = [root, False] root[:] = [root, root, None, None] if maxsize == 0: def wrapper(arg): res = function(arg) return res elif maxsize is None: def wrapper(arg): res = get(arg, sentinel) if res is not sentinel: return res res = function(arg) cache[arg] = res return res else: def wrapper(arg): with lock: link = get(arg) if link is not None: root = root_full[0] prev, next, _arg, res = link prev[1] = next next[0] = prev last = root[0] last[1] = root[0] = link link[0] = last link[1] = root return res res = function(arg) with lock: root, full = root_full if arg in cache: pass elif full: oldroot = root oldroot[2] = arg oldroot[3] = res root = root_full[0] = oldroot[1] oldarg = root[2] oldres = root[3] # keep reference root[2] = root[3] = None del cache[oldarg] cache[arg] = oldroot else: last = root[0] link = [last, root, arg, res] last[1] = root[0] = cache[arg] = link if len(cache) >= maxsize: root_full[1] = True return res wrapper.__wrapped__ = function return update_wrapper(wrapper, function) return decorator # Auxiliary classes and functions that are independent from a DB connection: try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = dict class AttrDict(dict): """Simple read-only ordered dictionary for storing attribute names.""" def __init__(self, *args, **kw): if len(args) > 1 or kw: raise TypeError items = args[0] if args else [] if isinstance(items, dict): raise TypeError items = list(items) self._keys = [item[0] for item in items] dict.__init__(self, items) self._read_only = True error = self._read_only_error self.clear = self.update = error self.pop = self.setdefault = self.popitem = error def __setitem__(self, key, value): if self._read_only: self._read_only_error() dict.__setitem__(self, key, value) def __delitem__(self, key): if self._read_only: self._read_only_error() dict.__delitem__(self, key) def __iter__(self): return iter(self._keys) def keys(self): return list(self._keys) def values(self): return [self[key] for key in self] def items(self): return [(key, self[key]) for key in self] def iterkeys(self): return self.__iter__() def itervalues(self): return iter(self.values()) def iteritems(self): return iter(self.items()) @staticmethod def _read_only_error(*args, **kw): raise TypeError('This object is read-only') else: class AttrDict(OrderedDict): """Simple read-only ordered dictionary for storing attribute names.""" def __init__(self, *args, **kw): self._read_only = False OrderedDict.__init__(self, *args, **kw) self._read_only = True error = self._read_only_error self.clear = self.update = error self.pop = self.setdefault = self.popitem = error def __setitem__(self, key, value): if self._read_only: self._read_only_error() OrderedDict.__setitem__(self, key, value) def __delitem__(self, key): if self._read_only: self._read_only_error() OrderedDict.__delitem__(self, key) @staticmethod def _read_only_error(*args, **kw): raise TypeError('This object is read-only') try: from inspect import signature except ImportError: # Python < 3.3 from inspect import getargspec def get_args(func): return getargspec(func).args else: def get_args(func): return list(signature(func).parameters) try: from datetime import timezone except ImportError: # Python < 3.2 class timezone(tzinfo): """Simple timezone implementation.""" def __init__(self, offset, name=None): self.offset = offset if not name: minutes = self.offset.days * 1440 + self.offset.seconds // 60 if minutes < 0: hours, minutes = divmod(-minutes, 60) hours = -hours else: hours, minutes = divmod(minutes, 60) name = 'UTC%+03d:%02d' % (hours, minutes) self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return None timezone.utc = timezone(timedelta(0), 'UTC') _has_timezone = False else: _has_timezone = True # time zones used in Postgres timestamptz output _timezones = dict(CET='+0100', EET='+0200', EST='-0500', GMT='+0000', HST='-1000', MET='+0100', MST='-0700', UCT='+0000', UTC='+0000', WET='+0000') def _timezone_as_offset(tz): if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def _get_timezone(tz): tz = _timezone_as_offset(tz) minutes = 60 * int(tz[1:3]) + int(tz[3:5]) if tz[0] == '-': minutes = -minutes return timezone(timedelta(minutes=minutes), tz) def _oid_key(table): """Build oid key from a table name.""" return 'oid(%s)' % table class _SimpleTypes(dict): """Dictionary mapping pg_type names to simple type names.""" _types = {'bool': 'bool', 'bytea': 'bytea', 'date': 'date interval time timetz timestamp timestamptz' ' abstime reltime', # these are very old 'float': 'float4 float8', 'int': 'cid int2 int4 int8 oid xid', 'hstore': 'hstore', 'json': 'json jsonb', 'uuid': 'uuid', 'num': 'numeric', 'money': 'money', 'text': 'bpchar char name text varchar'} def __init__(self): for typ, keys in self._types.items(): for key in keys.split(): self[key] = typ self['_%s' % key] = '%s[]' % typ # this could be a static method in Python > 2.6 def __missing__(self, key): return 'text' _simpletypes = _SimpleTypes() def _quote_if_unqualified(param, name): """Quote parameter representing a qualified name. Puts a quote_ident() call around the give parameter unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if isinstance(name, basestring) and '.' not in name: return 'quote_ident(%s)' % (param,) return param class _ParameterList(list): """Helper class for building typed parameter lists.""" def add(self, value, typ=None): """Typecast value with known database type and build parameter list. If this is a literal value, it will be returned as is. Otherwise, a placeholder will be returned and the parameter list will be augmented. """ value = self.adapt(value, typ) if isinstance(value, Literal): return value self.append(value) return '$%d' % len(self) class Bytea(bytes): """Wrapper class for marking Bytea values.""" class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') @classmethod def _quote(cls, s): if s is None: return 'NULL' if not s: return '""' s = s.replace('"', '\\"') if cls._re_quote.search(s): s = '"%s"' % s return s def __str__(self): q = self._quote return ','.join('%s=>%s' % (q(k), q(v)) for k, v in self.items()) class Json: """Wrapper class for marking Json values.""" def __init__(self, obj): self.obj = obj class Literal(str): """Wrapper class for marking literal SQL values.""" class Adapter: """Class providing methods for adapting parameters to the database.""" _bool_true_values = frozenset('t true 1 y yes on'.split()) _date_literals = frozenset('current_date current_time' ' current_timestamp localtime localtimestamp'.split()) _re_array_quote = regex(r'[{},"\\\s]|^[Nn][Uu][Ll][Ll]$') _re_record_quote = regex(r'[(,"\\]') _re_array_escape = _re_record_escape = regex(r'(["\\])') def __init__(self, db): self.db = weakref.proxy(db) @classmethod def _adapt_bool(cls, v): """Adapt a boolean parameter.""" if isinstance(v, basestring): if not v: return None v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_date(cls, v): """Adapt a date parameter.""" if not v: return None if isinstance(v, basestring) and v.lower() in cls._date_literals: return Literal(v) return v @staticmethod def _adapt_num(v): """Adapt a numeric parameter.""" if not v and v != 0: return None return v _adapt_int = _adapt_float = _adapt_money = _adapt_num def _adapt_bytea(self, v): """Adapt a bytea parameter.""" return self.db.escape_bytea(v) def _adapt_json(self, v): """Adapt a json parameter.""" if not v: return None if isinstance(v, basestring): return v return self.db.encode_json(v) @classmethod def _adapt_text_array(cls, v): """Adapt a text type array parameter.""" if isinstance(v, list): adapt = cls._adapt_text_array return '{%s}' % ','.join(adapt(v) for v in v) if v is None: return 'null' if not v: return '""' v = str(v) if cls._re_array_quote.search(v): v = '"%s"' % cls._re_array_escape.sub(r'\\\1', v) return v _adapt_date_array = _adapt_text_array @classmethod def _adapt_bool_array(cls, v): """Adapt a boolean array parameter.""" if isinstance(v, list): adapt = cls._adapt_bool_array return '{%s}' % ','.join(adapt(v) for v in v) if v is None: return 'null' if isinstance(v, basestring): if not v: return 'null' v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_num_array(cls, v): """Adapt a numeric array parameter.""" if isinstance(v, list): adapt = cls._adapt_num_array return '{%s}' % ','.join(adapt(v) for v in v) if not v and v != 0: return 'null' return str(v) _adapt_int_array = _adapt_float_array = _adapt_money_array = \ _adapt_num_array def _adapt_bytea_array(self, v): """Adapt a bytea array parameter.""" if isinstance(v, list): return b'{' + b','.join( self._adapt_bytea_array(v) for v in v) + b'}' if v is None: return b'null' return self.db.escape_bytea(v).replace(b'\\', b'\\\\') def _adapt_json_array(self, v): """Adapt a json array parameter.""" if isinstance(v, list): adapt = self._adapt_json_array return '{%s}' % ','.join(adapt(v) for v in v) if not v: return 'null' if not isinstance(v, basestring): v = self.db.encode_json(v) if self._re_array_quote.search(v): v = '"%s"' % self._re_array_escape.sub(r'\\\1', v) return v def _adapt_record(self, v, typ): """Adapt a record parameter with given type.""" typ = self.get_attnames(typ).values() if len(typ) != len(v): raise TypeError('Record parameter %s has wrong size' % v) adapt = self.adapt value = [] for v, t in zip(v, typ): v = adapt(v, t) if v is None: v = '' elif not v: v = '""' else: if isinstance(v, bytes): if str is not bytes: v = v.decode('ascii') else: v = str(v) if self._re_record_quote.search(v): v = '"%s"' % self._re_record_escape.sub(r'\\\1', v) value.append(v) return '(%s)' % ','.join(value) def adapt(self, value, typ=None): """Adapt a value with known database type.""" if value is not None and not isinstance(value, Literal): if typ: simple = self.get_simple_name(typ) else: typ = simple = self.guess_simple_type(value) or 'text' pg_str = getattr(value, '__pg_str__', None) if pg_str: value = pg_str(typ) if simple == 'text': pass elif simple == 'record': if isinstance(value, tuple): value = self._adapt_record(value, typ) elif simple.endswith('[]'): if isinstance(value, list): adapt = getattr(self, '_adapt_%s_array' % simple[:-2]) value = adapt(value) else: adapt = getattr(self, '_adapt_%s' % simple) value = adapt(value) return value @staticmethod def simple_type(name): """Create a simple database type with given attribute names.""" typ = DbType(name) typ.simple = name return typ @staticmethod def get_simple_name(typ): """Get the simple name of a database type.""" if isinstance(typ, DbType): return typ.simple return _simpletypes[typ] @staticmethod def get_attnames(typ): """Get the attribute names of a composite database type.""" if isinstance(typ, DbType): return typ.attnames return {} _frequent_simple_types = { Bytea: 'bytea', str: 'text', bytes: 'text', bool: 'bool', int: 'int', long: 'int', float: 'float', Decimal: 'num', date: 'date', time: 'date', datetime: 'date', timedelta: 'date' } @classmethod def guess_simple_type(cls, value): """Try to guess which database type the given value has.""" # optimize for most frequent types try: return cls._frequent_simple_types[type(value)] except KeyError: pass if isinstance(value, Bytea): return 'bytea' if isinstance(value, basestring): return 'text' if isinstance(value, bool): return 'bool' if isinstance(value, (int, long)): return 'int' if isinstance(value, float): return 'float' if isinstance(value, Decimal): return 'num' if isinstance(value, (date, time, datetime, timedelta)): return 'date' if isinstance(value, list): return '%s[]' % (cls.guess_simple_base_type(value) or 'text',) if isinstance(value, tuple): simple_type = cls.simple_type guess = cls.guess_simple_type def get_attnames(self): return AttrDict((str(n + 1), simple_type(guess(v))) for n, v in enumerate(value)) typ = simple_type('record') typ._get_attnames = get_attnames return typ @classmethod def guess_simple_base_type(cls, value): """Try to guess the base type of a given array.""" for v in value: if isinstance(v, list): typ = cls.guess_simple_base_type(v) else: typ = cls.guess_simple_type(v) if typ: return typ def adapt_inline(self, value, nested=False): """Adapt a value that is put into the SQL and needs to be quoted.""" if value is None: return 'NULL' if isinstance(value, Literal): return value if isinstance(value, Bytea): value = self.db.escape_bytea(value) if bytes is not str: # Python >= 3.0 value = value.decode('ascii') elif isinstance(value, Json): if value.encode: return value.encode() value = self.db.encode_json(value) elif isinstance(value, (datetime, date, time, timedelta)): value = str(value) if isinstance(value, basestring): value = self.db.escape_string(value) return "'%s'" % value if isinstance(value, bool): return 'true' if value else 'false' if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, long, Decimal)): return value if isinstance(value, list): q = self.adapt_inline s = '[%s]' if nested else 'ARRAY[%s]' return s % ','.join(str(q(v, nested=True)) for v in value) if isinstance(value, tuple): q = self.adapt_inline return '(%s)' % ','.join(str(q(v)) for v in value) pg_repr = getattr(value, '__pg_repr__', None) if not pg_repr: raise InterfaceError( 'Do not know how to adapt type %s' % type(value)) value = pg_repr() if isinstance(value, (tuple, list)): value = self.adapt_inline(value) return value def parameter_list(self): """Return a parameter list for parameters with known database types. The list has an add(value, typ) method that will build up the list and return either the literal value or a placeholder. """ params = _ParameterList() params.adapt = self.adapt return params def format_query(self, command, values=None, types=None, inline=False): """Format a database query using the given values and types.""" if not values: return command, [] if inline and types: raise ValueError('Typed parameters must be sent separately') params = self.parameter_list() if isinstance(values, (list, tuple)): if inline: adapt = self.adapt_inline literals = [adapt(value) for value in values] else: add = params.add if types: if (not isinstance(types, (list, tuple)) or len(types) != len(values)): raise TypeError('The values and types do not match') literals = [add(value, typ) for value, typ in zip(values, types)] else: literals = [add(value) for value in values] command %= tuple(literals) elif isinstance(values, dict): # we want to allow extra keys in the dictionary, # so we first must find the values actually used in the command used_values = {} literals = dict.fromkeys(values, '') for key in values: del literals[key] try: command % literals except KeyError: used_values[key] = values[key] literals[key] = '' values = used_values if inline: adapt = self.adapt_inline literals = dict((key, adapt(value)) for key, value in values.items()) else: add = params.add if types: if not isinstance(types, dict): raise TypeError('The values and types do not match') literals = dict((key, add(values[key], types.get(key))) for key in sorted(values)) else: literals = dict((key, add(values[key])) for key in sorted(values)) command %= literals else: raise TypeError('The values must be passed as tuple, list or dict') return command, params def cast_bool(value): """Cast a boolean value.""" if not get_bool(): return value return value[0] == 't' def cast_json(value): """Cast a JSON value.""" cast = get_jsondecode() if not cast: return value return cast(value) def cast_num(value): """Cast a numeric value.""" return (get_decimal() or float)(value) def cast_money(value): """Cast a money value.""" point = get_decimal_point() if not point: return value if point != '.': value = value.replace(point, '.') value = value.replace('(', '-') value = ''.join(c for c in value if c.isdigit() or c in '.-') return (get_decimal() or float)(value) def cast_int2vector(value): """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value, connection): """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max value = value.split() if value[-1] == 'BC': return date.min value = value[0] if len(value) > 10: return date.max fmt = connection.date_format() return datetime.strptime(value, fmt).date() def cast_time(value): """Cast a time value.""" fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value): """Cast a timetz value.""" tz = _re_timezone.match(value) if tz: value, tz = tz.groups() else: tz = '+0000' fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' if _has_timezone: value += _timezone_as_offset(tz) fmt += '%z' return datetime.strptime(value, fmt).timetz() return datetime.strptime(value, fmt).timetz().replace( tzinfo=_get_timezone(tz)) def cast_timestamp(value, connection): """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:5] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(value), ' '.join(fmt)) def cast_timestamptz(value, connection): """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] value, tz = value[:-1], value[-1] else: if fmt.startswith('%Y-'): tz = _re_timezone.match(value[1]) if tz: value[1], tz = tz.groups() else: tz = '+0000' else: value, tz = value[:-1], value[-1] if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] if _has_timezone: value.append(_timezone_as_offset(tz)) fmt.append('%z') return datetime.strptime(' '.join(value), ' '.join(fmt)) return datetime.strptime(' '.join(value), ' '.join(fmt)).replace( tzinfo=_get_timezone(tz)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value): """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: m = [d or '0' for d in m.groups()] secs_ago = m.pop(5) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: m, ago = [d or '0' for d in m.groups()[:8]], m.group(9) secs_ago = m.pop(5) == '-' m = [-int(d) for d in m] if ago else [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] years_ago = m.pop(0) == '-' hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError('Cannot parse interval: %s' % value) days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. Note that the basic types are already handled by the C extension. They only need to be handled here as record or array components. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults = {'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int, 'hstore': cast_hstore, 'json': cast_json, 'jsonb': cast_json, 'float4': float, 'float8': float, 'numeric': cast_num, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': UUID, 'anyarray': cast_array, 'record': cast_record} connection = None # will be set in a connection specific instance def __missing__(self, typ): """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError('Invalid type: %s' % typ) cast = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: attnames = self.get_attnames(typ) if attnames: casts = [self[v.pgtype] for v in attnames.values()] cast = self.create_record_cast(typ, attnames, casts) self[typ] = cast return cast @staticmethod def _needs_connection(func): """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False else: return 'connection' in args[1:] def _add_connection(self, cast): """Add a connection argument to the typecast function if necessary.""" if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ, default=None): """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ, cast): """Set a typecast function for the specified database type(s).""" if isinstance(typ, basestring): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop('_%s' % t, None) def reset(self, typ=None): """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ if typ is None: self.clear() else: if isinstance(typ, basestring): typ = [typ] for t in typ: self.pop(t, None) @classmethod def get_default(cls, typ): """Get the default typecast function for the given database type.""" return cls.defaults.get(typ) @classmethod def set_default(cls, typ, cast): """Set a default typecast function for the given database type(s).""" if isinstance(typ, basestring): typ = [typ] defaults = cls.defaults if cast is None: for t in typ: defaults.pop(t, None) defaults.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: defaults[t] = cast defaults.pop('_%s' % t, None) def get_attnames(self, typ): """Return the fields for the given record type. This method will be replaced with the get_attnames() method of DbTypes. """ return {} def dateformat(self): """Return the current date format. This method will be replaced with the dateformat() method of DbTypes. """ return '%Y-%m-%d' def create_array_cast(self, basecast): """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v): return cast_array(v, basecast) return cast def create_record_cast(self, name, fields, casts): """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) def cast(v): return record(*cast_record(v, casts)) return cast def get_typecast(typ): """Get the global typecast function for the given database type(s).""" return Typecasts.get_default(typ) def set_typecast(typ, cast): """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call db.db_types.reset_typecast(). """ Typecasts.set_default(typ, cast) class DbType(str): """Class augmenting the simple type name with additional info. The following additional information is provided: oid: the PostgreSQL type OID pgtype: the internal PostgreSQL data type name regtype: the registered PostgreSQL data type name simple: the more coarse-grained PyGreSQL type name typtype: b = base type, c = composite type etc. category: A = Array, b = Boolean, C = Composite etc. delim: delimiter for array types relid: corresponding table for composite types attnames: attributes for composite types """ @property def attnames(self): """Get names and types of the fields of a composite type.""" return self._get_attnames(self) class DbTypes(dict): """Cache for PostgreSQL data types. This cache maps type OIDs and names to DbType objects containing information on the associated database type. """ _num_types = frozenset('int float num money' ' int2 int4 int8 float4 float8 numeric money'.split()) def __init__(self, db): """Initialize type cache for connection.""" super(DbTypes, self).__init__() self._db = weakref.proxy(db) self._regtypes = False self._typecasts = Typecasts() self._typecasts.get_attnames = self.get_attnames self._typecasts.connection = self._db if db.server_version < 80400: # older remote databases (not officially supported) self._query_pg_type = ( "SELECT oid, typname, typname::text::regtype," " typtype, null as typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type" " WHERE oid OPERATOR(pg_catalog.=) %s::regtype") else: self._query_pg_type = ( "SELECT oid, typname, typname::regtype," " typtype, typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type" " WHERE oid OPERATOR(pg_catalog.=) %s::regtype") def add(self, oid, pgtype, regtype, typtype, category, delim, relid): """Create a PostgreSQL type name with additional info.""" if oid in self: return self[oid] simple = 'record' if relid else _simpletypes[pgtype] typ = DbType(regtype if self._regtypes else simple) typ.oid = oid typ.simple = simple typ.pgtype = pgtype typ.regtype = regtype typ.typtype = typtype typ.category = category typ.delim = delim typ.relid = relid typ._get_attnames = self.get_attnames return typ def __missing__(self, key): """Get the type info from the database if it is not cached.""" try: q = self._query_pg_type % (_quote_if_unqualified('$1', key),) res = self._db.query(q, (key,)).getresult() except ProgrammingError: res = None if not res: raise KeyError('Type %s could not be found' % key) res = res[0] typ = self.add(*res) self[typ.oid] = self[typ.pgtype] = typ return typ def get(self, key, default=None): """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_attnames(self, typ): """Get names and types of the fields of a composite type.""" if not isinstance(typ, DbType): typ = self.get(typ) if not typ: return None if not typ.relid: return None return self._db.get_attnames(typ.relid, with_oid=False) def get_typecast(self, typ): """Get the typecast function for the given database type.""" return self._typecasts.get(typ) def set_typecast(self, typ, cast): """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ=None): """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value, typ): """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None if not isinstance(typ, DbType): typ = self.get(typ) if typ: typ = typ.pgtype cast = self.get_typecast(typ) if typ else None if not cast or cast is str: # no typecast is necessary return value return cast(value) _re_fieldname = regex('^[A-Za-z][_a-zA-Z0-9]*$') # The result rows for database operations are returned as named tuples # by default. Since creating namedtuple classes is a somewhat expensive # operation, we cache up to 1024 of these classes by default. @lru_cache(maxsize=1024) def _row_factory(names): """Get a namedtuple factory for row results with the given names.""" try: try: return namedtuple('Row', names, rename=True)._make except TypeError: # Python 2.6 and 3.0 do not support rename names = [v if _re_fieldname.match(v) and not iskeyword(v) else 'column_%d' % (n,) for n, v in enumerate(names)] return namedtuple('Row', names)._make except ValueError: # there is still a problem with the field names names = ['column_%d' % (n,) for n in range(len(names))] return namedtuple('Row', names)._make def set_row_factory_size(maxsize): """Change the size of the namedtuple factory cache. If maxsize is set to None, the cache can grow without bound. """ global _row_factory _row_factory = lru_cache(maxsize)(_row_factory.__wrapped__) # Helper functions used by the query object def _dictiter(q): """Get query result as an iterator of dictionaries.""" fields = q.listfields() for r in q: yield dict(zip(fields, r)) def _namediter(q): """Get query result as an iterator of named tuples.""" row = _row_factory(q.listfields()) for r in q: yield row(r) def _namednext(q): """Get next row from query result as a named tuple.""" return _row_factory(q.listfields())(next(q)) def _scalariter(q): """Get query result as an iterator of scalar values.""" for r in q: yield r[0] class _MemoryQuery: """Class that embodies a given query result.""" def __init__(self, result, fields): """Create query from given result rows and field names.""" self.result = result self.fields = tuple(fields) def listfields(self): """Return the stored field names of this query.""" return self.fields def getresult(self): """Return the stored result of this query.""" return self.result def __iter__(self): return iter(self.result) def _db_error(msg, cls=DatabaseError): """Return DatabaseError with empty sqlstate attribute.""" error = cls(msg) error.sqlstate = None return error def _int_error(msg): """Return InternalError.""" return _db_error(msg, InternalError) def _prg_error(msg): """Return ProgrammingError.""" return _db_error(msg, ProgrammingError) # Initialize the C module set_decimal(Decimal) set_jsondecode(jsondecode) set_query_helpers(_dictiter, _namediter, _namednext, _scalariter) # The notification handler class NotificationHandler(object): """A PostgreSQL client-side asynchronous notification handler.""" def __init__(self, db, event, callback=None, arg_dict=None, timeout=None, stop_event=None): """Initialize the notification handler. You must pass a PyGreSQL database connection, the name of an event (notification channel) to listen for and a callback function. You can also specify a dictionary arg_dict that will be passed as the single argument to the callback function, and a timeout value in seconds (a floating point number denotes fractions of seconds). If it is absent or None, the callers will never time out. If the timeout is reached, the callback function will be called with a single argument that is None. If you set the timeout to zero, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as stop_event. By default, it will be the event name prefixed with 'stop_'. """ self.db = db self.event = event self.stop_event = stop_event or 'stop_%s' % event self.listening = False self.callback = callback if arg_dict is None: arg_dict = {} self.arg_dict = arg_dict self.timeout = timeout def __del__(self): self.unlisten() def close(self): """Stop listening and close the connection.""" if self.db: self.unlisten() self.db.close() self.db = None def listen(self): """Start listening for the event and the stop event.""" if not self.listening: self.db.query('listen "%s"' % self.event) self.db.query('listen "%s"' % self.stop_event) self.listening = True def unlisten(self): """Stop listening for the event and the stop event.""" if self.listening: self.db.query('unlisten "%s"' % self.event) self.db.query('unlisten "%s"' % self.stop_event) self.listening = False def notify(self, db=None, stop=False, payload=None): """Generate a notification. Optionally, you can pass a payload with the notification. If you set the stop flag, a stop notification will be sent that will cause the handler to stop listening. Note: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. """ if self.listening: if not db: db = self.db q = 'notify "%s"' % (self.stop_event if stop else self.event) if payload: q += ", '%s'" % payload return db.query(q) def __call__(self): """Invoke the notification handler. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated 'pid', 'event' and 'extra' (the payload passed with the notification) are inserted into its arg_dict dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to zero, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. Note: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. """ self.listen() poll = self.timeout == 0 if not poll: rlist = [self.db.fileno()] while self.listening: if poll or select.select(rlist, [], [], self.timeout)[0]: while self.listening: notice = self.db.getnotify() if not notice: # no more messages break event, pid, extra = notice if event not in (self.event, self.stop_event): self.unlisten() raise _db_error( 'Listening for "%s" and "%s", but notified of "%s"' % (self.event, self.stop_event, event)) if event == self.stop_event: self.unlisten() self.arg_dict.update(pid=pid, event=event, extra=extra) self.callback(self.arg_dict) if poll: break else: # we timed out self.unlisten() self.callback(None) def pgnotify(*args, **kw): """Same as NotificationHandler, under the traditional name.""" warnings.warn("pgnotify is deprecated, use NotificationHandler instead", DeprecationWarning, stacklevel=2) return NotificationHandler(*args, **kw) # The actual PostgreSQL database connection interface: class DB: """Wrapper class for the _pg connection type.""" db = None # invalid fallback for underlying connection def __init__(self, *args, **kw): """Create a new connection You can pass either the connection parameters or an existing _pg or pgdb connection. This allows you to use the methods of the classic pg interface with a DB-API 2 pgdb connection. """ if not args and len(kw) == 1: db = kw.get('db') elif not kw and len(args) == 1: db = args[0] else: db = None if db: if isinstance(db, DB): db = db.db else: try: db = db._cnx except AttributeError: pass if not db or not hasattr(db, 'db') or not hasattr(db, 'query'): db = connect(*args, **kw) self._db_args = args, kw self._closeable = True else: self._db_args = db self._closeable = False self.db = db self.dbname = db.db self._regtypes = False self._attnames = {} self._pkeys = {} self._privileges = {} self.adapter = Adapter(self) self.dbtypes = DbTypes(self) if db.server_version < 80400: # support older remote data bases (not officially supported) self._query_attnames = ( "SELECT a.attname, t.oid, t.typname, t.typname::text::regtype," " t.typtype, null as typcategory, t.typdelim, t.typrelid" " FROM pg_catalog.pg_attribute a" " JOIN pg_catalog.pg_type t" " ON t.oid OPERATOR(pg_catalog.=) a.atttypid" " WHERE a.attrelid OPERATOR(pg_catalog.=) %s::regclass AND %s" " AND NOT a.attisdropped ORDER BY a.attnum") else: self._query_attnames = ( "SELECT a.attname, t.oid, t.typname, t.typname::regtype," " t.typtype, t.typcategory, t.typdelim, t.typrelid" " FROM pg_catalog.pg_attribute a" " JOIN pg_catalog.pg_type t" " ON t.oid OPERATOR(pg_catalog.=) a.atttypid" " WHERE a.attrelid OPERATOR(pg_catalog.=) %s::regclass AND %s" " AND NOT a.attisdropped ORDER BY a.attnum") db.set_cast_hook(self.dbtypes.typecast) self.debug = None # For debugging scripts, this can be set # * to a string format specification (e.g. in CGI set to "%s
"), # * to a file object to write debug statements or # * to a callable object which takes a string argument # * to any other true value to just print debug statements def __getattr__(self, name): # All undefined members are same as in underlying connection: if self.db: return getattr(self.db, name) else: raise _int_error('Connection is not valid') def __dir__(self): # Custom dir function including the attributes of the connection: attrs = set(self.__class__.__dict__) attrs.update(self.__dict__) attrs.update(dir(self.db)) return sorted(attrs) # Context manager methods def __enter__(self): """Enter the runtime context. This will start a transaction.""" self.begin() return self def __exit__(self, et, ev, tb): """Exit the runtime context. This will end the transaction.""" if et is None and ev is None and tb is None: self.commit() else: self.rollback() def __del__(self): try: db = self.db except AttributeError: db = None if db: try: db.set_cast_hook(None) except TypeError: pass # probably already closed if self._closeable: try: db.close() except InternalError: pass # probably already closed # Auxiliary methods def _do_debug(self, *args): """Print a debug message""" if self.debug: s = '\n'.join(str(arg) for arg in args) if isinstance(self.debug, basestring): print(self.debug % s) elif hasattr(self.debug, 'write'): self.debug.write(s + '\n') elif callable(self.debug): self.debug(s) else: print(s) def _escape_qualified_name(self, s): """Escape a qualified name. Escapes the name for use as an SQL identifier, unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if '.' not in s: s = self.escape_identifier(s) return s @staticmethod def _make_bool(d): """Get boolean value corresponding to d.""" return bool(d) if get_bool() else ('t' if d else 'f') def _list_params(self, params): """Create a human readable parameter list.""" return ', '.join('$%d=%r' % (n, v) for n, v in enumerate(params, 1)) # Public methods # escape_string and escape_bytea exist as methods, # so we define unescape_bytea as a method as well unescape_bytea = staticmethod(unescape_bytea) def decode_json(self, s): """Decode a JSON string coming from the database.""" return (get_jsondecode() or jsondecode)(s) def encode_json(self, d): """Encode a JSON string for use within SQL.""" return jsonencode(d) def close(self): """Close the database connection.""" # Wraps shared library function so we can track state. db = self.db if db: try: db.set_cast_hook(None) except TypeError: pass # probably already closed if self._closeable: db.close() self.db = None else: raise _int_error('Connection already closed') def reset(self): """Reset connection with current parameters. All derived queries and large objects derived from this connection will not be usable after this call. """ if self.db: self.db.reset() else: raise _int_error('Connection already closed') def reopen(self): """Reopen connection to the database. Used in case we need another connection to the same database. Note that we can still reopen a database that we have closed. """ # There is no such shared library function. if self._closeable: db = connect(*self._db_args[0], **self._db_args[1]) if self.db: self.db.set_cast_hook(None) self.db.close() db.set_cast_hook(self.dbtypes.typecast) self.db = db else: self.db = self._db_args def begin(self, mode=None): """Begin a transaction.""" qstr = 'BEGIN' if mode: qstr += ' ' + mode return self.query(qstr) start = begin def commit(self): """Commit the current transaction.""" return self.query('COMMIT') end = commit def rollback(self, name=None): """Roll back the current transaction.""" qstr = 'ROLLBACK' if name: qstr += ' TO ' + name return self.query(qstr) abort = rollback def savepoint(self, name): """Define a new savepoint within the current transaction.""" return self.query('SAVEPOINT ' + name) def release(self, name): """Destroy a previously defined savepoint.""" return self.query('RELEASE ' + name) def get_parameter(self, parameter): """Get the value of a run-time parameter. If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name 'all' as the parameter, you can get a dict of all existing configuration parameters. """ if isinstance(parameter, basestring): parameter = [parameter] values = None elif isinstance(parameter, (list, tuple)): values = [] elif isinstance(parameter, (set, frozenset)): values = {} elif isinstance(parameter, dict): values = parameter else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') params = {} if isinstance(values, dict) else [] for key in parameter: param = key.strip().lower() if isinstance( key, basestring) else None if not param: raise TypeError('Invalid parameter') if param == 'all': q = 'SHOW ALL' values = self.db.query(q).getresult() values = dict(value[:2] for value in values) break if isinstance(values, dict): params[param] = key else: params.append(param) else: for param in params: q = 'SHOW %s' % (param,) value = self.db.query(q).getresult()[0][0] if values is None: values = value elif isinstance(values, list): values.append(value) else: values[params[param]] = value return values def set_parameter(self, parameter, value=None, local=False): """Set the value of a run-time parameter. If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or None is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name 'all' as the parameter, you can reset all existing settable run-time parameters to their default values. If you set local to True, then the command takes effect for only the current transaction. After commit() or rollback(), the session-level setting takes effect again. Setting local to True will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. """ if isinstance(parameter, basestring): parameter = {parameter: value} elif isinstance(parameter, (list, tuple)): if isinstance(value, (list, tuple)): parameter = dict(zip(parameter, value)) else: parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, (set, frozenset)): if isinstance(value, (list, tuple, set, frozenset)): value = set(value) if len(value) == 1: value = value.pop() if not(value is None or isinstance(value, basestring)): raise ValueError('A single value must be specified' ' when parameter is a set') parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, dict): if value is not None: raise ValueError('A value must not be specified' ' when parameter is a dictionary') else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') params = {} for key, value in parameter.items(): param = key.strip().lower() if isinstance( key, basestring) else None if not param: raise TypeError('Invalid parameter') if param == 'all': if value is not None: raise ValueError('A value must ot be specified' " when parameter is 'all'") params = {'all': None} break params[param] = value local = ' LOCAL' if local else '' for param, value in params.items(): if value is None: q = 'RESET%s %s' % (local, param) else: q = 'SET%s %s TO %s' % (local, param, value) self._do_debug(q) self.db.query(q) def query(self, command, *args): """Execute a SQL command string. This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row. If the query is an update or delete statement, or an insert statement that did not insert exactly one row in a table with OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an "insert/update ... returning" statement), this method returns a Query object that can be accessed via getresult() or dictresult() or simply printed. Otherwise, it returns `None`. The query can contain numbered parameters of the form $1 in place of any data constant. Arguments given after the query string will be substituted for the corresponding numbered parameter. Parameter values can also be given as a single list or tuple argument. """ # Wraps shared library function for debugging. if not self.db: raise _int_error('Connection is not valid') if args: self._do_debug(command, args) return self.db.query(command, args) self._do_debug(command) return self.db.query(command) def query_formatted(self, command, parameters=None, types=None, inline=False): """Execute a formatted SQL command string. Similar to query, but using Python format placeholders of the form %s or %(names)s instead of PostgreSQL placeholders of the form $1. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set inline to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. """ return self.query(*self.adapter.format_query( command, parameters, types, inline)) def query_prepared(self, name, *args): """Execute a prepared SQL statement. This works like the query() method, except that instead of passing the SQL command, you pass the name of a prepared statement. If you pass an empty name, the unnamed statement will be executed. """ if not self.db: raise _int_error('Connection is not valid') if name is None: name = '' if args: self._do_debug('EXECUTE', name, args) return self.db.query_prepared(name, args) self._do_debug('EXECUTE', name) return self.db.query_prepared(name) def prepare(self, name, command): """Create a prepared SQL statement. This creates a prepared statement for the given command with the given name for later execution with the query_prepared() method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise it is an error if the statement name is already defined in the current database session. We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations. """ if not self.db: raise _int_error('Connection is not valid') if name is None: name = '' self._do_debug('prepare', name, command) return self.db.prepare(name, command) def describe_prepared(self, name=None): """Describe a prepared SQL statement. This method returns a Query object describing the result columns of the prepared statement with the given name. If you omit the name, the unnamed statement will be described if you created one before. """ if name is None: name = '' return self.db.describe_prepared(name) def delete_prepared(self, name=None): """Delete a prepared SQL statement This deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are also deallocated automatically when the current session ends. """ q = "DEALLOCATE %s" % (name or 'ALL',) self._do_debug(q) return self.db.query(q) def pkey(self, table, composite=False, flush=False): """Get or set the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. If flush is set then the internal cache for primary keys will be flushed. This may be necessary after the database schema or the search path has been changed. """ pkeys = self._pkeys if flush: pkeys.clear() self._do_debug('The pkey cache has been flushed') try: # cache lookup pkey = pkeys[table] except KeyError: # cache miss, check the database q = ("SELECT a.attname, a.attnum, i.indkey" " FROM pg_catalog.pg_index i" " JOIN pg_catalog.pg_attribute a" " ON a.attrelid OPERATOR(pg_catalog.=) i.indrelid" " AND a.attnum OPERATOR(pg_catalog.=) ANY(i.indkey)" " AND NOT a.attisdropped" " WHERE i.indrelid OPERATOR(pg_catalog.=) %s::regclass" " AND i.indisprimary ORDER BY a.attnum") % ( _quote_if_unqualified('$1', table),) pkey = self.db.query(q, (table,)).getresult() if not pkey: raise KeyError('Table %s has no primary key' % table) # we want to use the order defined in the primary key index here, # not the order as defined by the columns in the table if len(pkey) > 1: indkey = pkey[0][2] pkey = sorted(pkey, key=lambda row: indkey.index(row[1])) pkey = tuple(row[0] for row in pkey) else: pkey = pkey[0][0] pkeys[table] = pkey # cache it if composite and not isinstance(pkey, tuple): pkey = (pkey,) return pkey def get_databases(self): """Get list of databases in the system.""" return [s[0] for s in self.db.query( 'SELECT datname FROM pg_catalog.pg_database').getresult()] def get_relations(self, kinds=None, system=False): """Get list of relations in connected database of specified kinds. If kinds is None or empty, all kinds of relations are returned. Otherwise kinds can be a string or sequence of type letters specifying which kind of relations you want to list. Set the system flag if you want to get the system relations as well. """ where = [] if kinds: where.append("r.relkind IN (%s)" % ','.join("'%s'" % k for k in kinds)) if not system: where.append("s.nspname NOT SIMILAR" " TO 'pg/_%|information/_schema' ESCAPE '/'") where = " WHERE %s" % ' AND '.join(where) if where else '' q = ("SELECT pg_catalog.quote_ident(s.nspname) OPERATOR(pg_catalog.||)" " '.' OPERATOR(pg_catalog.||) pg_catalog.quote_ident(r.relname)" " FROM pg_catalog.pg_class r" " JOIN pg_catalog.pg_namespace s" " ON s.oid OPERATOR(pg_catalog.=) r.relnamespace%s" " ORDER BY s.nspname, r.relname") % where return [r[0] for r in self.db.query(q).getresult()] def get_tables(self, system=False): """Return list of tables in connected database. Set the system flag if you want to get the system tables as well. """ return self.get_relations('r', system) def get_attnames(self, table, with_oid=True, flush=False): """Given the name of a table, dig out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. If flush is set, then the internal cache for attribute names will be flushed. This may be necessary after the database schema or the search path has been changed. By default, only a limited number of simple types will be returned. You can get the registered types after calling use_regtypes(True). """ attnames = self._attnames if flush: attnames.clear() self._do_debug('The attnames cache has been flushed') try: # cache lookup names = attnames[table] except KeyError: # cache miss, check the database q = "a.attnum OPERATOR(pg_catalog.>) 0" if with_oid: q = "(%s OR a.attname OPERATOR(pg_catalog.=) 'oid')" % q q = self._query_attnames % (_quote_if_unqualified('$1', table), q) names = self.db.query(q, (table,)).getresult() types = self.dbtypes names = ((name[0], types.add(*name[1:])) for name in names) names = AttrDict(names) attnames[table] = names # cache it return names def use_regtypes(self, regtypes=None): """Use registered type names instead of simplified type names.""" if regtypes is None: return self.dbtypes._regtypes else: regtypes = bool(regtypes) if regtypes != self.dbtypes._regtypes: self.dbtypes._regtypes = regtypes self._attnames.clear() self.dbtypes.clear() return regtypes def has_table_privilege(self, table, privilege='select', flush=False): """Check whether current user has specified table privilege. If flush is set, then the internal cache for table privileges will be flushed. This may be necessary after privileges have been changed. """ privileges = self._privileges if flush: privileges.clear() self._do_debug('The privileges cache has been flushed') privilege = privilege.lower() try: # ask cache ret = privileges[table, privilege] except KeyError: # cache miss, ask the database q = "SELECT pg_catalog.has_table_privilege(%s, $2)" % ( _quote_if_unqualified('$1', table),) q = self.db.query(q, (table, privilege)) ret = q.getresult()[0][0] == self._make_bool(True) privileges[table, privilege] = ret # cache it return ret def get(self, table, row, keyname=None): """Get a row from a database table or view. This method is the basic mechanism to get a single row. It assumes that the keyname specifies a unique row. It must be the name of a single column or a tuple of column names. If the keyname is not specified, then the primary key for the table is used. If row is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed keyname or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values when row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as "oid(table)" using the actual name of the table. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if keyname and isinstance(keyname, basestring): keyname = (keyname,) if qoid and isinstance(row, dict) and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if not keyname: try: # if keyname is not specified, try using the primary key keyname = self.pkey(table, True) except KeyError: # the table has no primary key # try using the oid instead if qoid and isinstance(row, dict) and 'oid' in row: keyname = ('oid',) else: raise _prg_error('Table %s has no primary key' % table) else: # the table has a primary key # check whether all key columns have values if isinstance(row, dict) and not set(keyname).issubset(row): # try using the oid instead if qoid and 'oid' in row: keyname = ('oid',) else: raise KeyError( 'Missing value in row for specified keyname') if not isinstance(row, dict): if not isinstance(row, (tuple, list)): row = [row] if len(keyname) != len(row): raise KeyError( 'Differing number of items in keyname and row') row = dict(zip(keyname, row)) params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier what = 'oid, *' if qoid else '*' where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] q = 'SELECT %s FROM %s WHERE %s LIMIT 1' % ( what, self._escape_qualified_name(table), where) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if not res: # make where clause in error message better readable where = where.replace('OPERATOR(pg_catalog.=)', '=') raise _db_error('No such record in %s\nwhere %s\nwith %s' % ( table, where, self._list_params(params))) for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def insert(self, table, row=None, **kw): """Insert a row into a database table. This method inserts a row into a table. The name of the table must be passed as the first parameter. The other parameters are used for providing the data of the row that shall be inserted into the table. If a dictionary is supplied as the second parameter, it starts with that. Otherwise it uses a blank dictionary. Either way the dictionary is updated from the keywords. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} row.update(kw) if 'oid' in row: del row['oid'] # do not insert oid attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier names, values = [], [] for n in attnames: if n in row: names.append(col(n)) values.append(adapt(row[n], attnames[n])) if not names: raise _prg_error('No column found that can be inserted') names, values = ', '.join(names), ', '.join(values) ret = 'oid, *' if qoid else '*' q = 'INSERT INTO %s (%s) VALUES (%s) RETURNING %s' % ( self._escape_qualified_name(table), names, values, ret) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if res: # this should always be true for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def update(self, table, row=None, **kw): """Update an existing row in a database table. Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. """ if table.endswith('*'): table = table[:-1].rstrip() # need parent table name attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keyname = ('oid',) else: # try using the primary key try: keyname = self.pkey(table, True) except KeyError: # the table has no primary key raise _prg_error('Table %s has no primary key' % table) # check whether all key columns have values if not set(keyname).issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] values = [] keyname = set(keyname) for n in attnames: if n in row and n not in keyname: values.append('%s = %s' % (col(n), adapt(row[n], attnames[n]))) if not values: return row values = ', '.join(values) ret = 'oid, *' if qoid else '*' q = 'UPDATE %s SET %s WHERE %s RETURNING %s' % ( self._escape_qualified_name(table), values, where, ret) self._do_debug(q, params) q = self.db.query(q, params) res = q.dictresult() if res: # may be empty when row does not exist for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def upsert(self, table, row=None, **kw): """Insert a row into a database table with conflict resolution This method inserts a row into a table, but instead of raising a ProgrammingError exception in case a row with the same primary key already exists, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of False or None means the column shall not be updated, a value of True means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value True. So if in the case of a conflict you want to update every column that has been passed in the dictionary row, you would call upsert(table, row). If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call upsert(table, row, **dict.fromkeys(row)). If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by prefixing the column name with "included.", and to the value that has been proposed for insertion by prefixing the column name with the "excluded." The dictionary is modified in any case to reflect the values in the database after the operation has completed. Note: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} if 'oid' in row: del row['oid'] # do not insert oid if 'oid' in kw: del kw['oid'] # do not update oid attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier names, values, updates = [], [], [] for n in attnames: if n in row: names.append(col(n)) values.append(adapt(row[n], attnames[n])) names, values = ', '.join(names), ', '.join(values) try: keyname = self.pkey(table, True) except KeyError: raise _prg_error('Table %s has no primary key' % table) target = ', '.join(col(k) for k in keyname) update = [] keyname = set(keyname) keyname.add('oid') for n in attnames: if n not in keyname: value = kw.get(n, True) if value: if not isinstance(value, basestring): value = 'excluded.%s' % col(n) update.append('%s = %s' % (col(n), value)) if not values: return row do = 'update set %s' % ', '.join(update) if update else 'nothing' ret = 'oid, *' if qoid else '*' q = ('INSERT INTO %s AS included (%s) VALUES (%s)' ' ON CONFLICT (%s) DO %s RETURNING %s') % ( self._escape_qualified_name(table), names, values, target, do, ret) self._do_debug(q, params) try: q = self.db.query(q, params) except ProgrammingError: if self.server_version < 90500: raise _prg_error( 'Upsert operation is not supported by PostgreSQL version') raise # re-raise original error res = q.dictresult() if res: # may be empty with "do nothing" for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value else: self.get(table, row) return row def clear(self, table, row=None): """Clear all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to false, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. """ # At some point we will need a way to get defaults from a table. if row is None: row = {} # empty if argument is not present attnames = self.get_attnames(table) for n, t in attnames.items(): if n == 'oid': continue t = t.simple if t in DbTypes._num_types: row[n] = 0 elif t == 'bool': row[n] = self._make_bool(False) else: row[n] = '' return row def delete(self, table, row=None, **kw): """Delete an existing row in a database table. This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method raises a ProgrammingError. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = _oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keyname = ('oid',) else: # try using the primary key try: keyname = self.pkey(table, True) except KeyError: # the table has no primary key raise _prg_error('Table %s has no primary key' % table) # check whether all key columns have values if not set(keyname).issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('%s OPERATOR(pg_catalog.=) %s' % ( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] q = 'DELETE FROM %s WHERE %s' % ( self._escape_qualified_name(table), where) self._do_debug(q, params) res = self.db.query(q, params) return int(res) def truncate(self, table, restart=False, cascade=False, only=False): """Empty a table or set of tables. This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If restart is set to True, sequences owned by columns of the truncated table(s) are automatically restarted. If cascade is set to True, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter only is not set to True, all the descendant tables (if any) will also be truncated. Optionally, a '*' can be specified after the table name to explicitly indicate that descendant tables are included. """ if isinstance(table, basestring): only = {table: only} table = [table] elif isinstance(table, (list, tuple)): if isinstance(only, (list, tuple)): only = dict(zip(table, only)) else: only = dict.fromkeys(table, only) elif isinstance(table, (set, frozenset)): only = dict.fromkeys(table, only) else: raise TypeError('The table must be a string, list or set') if not (restart is None or isinstance(restart, (bool, int))): raise TypeError('Invalid type for the restart option') if not (cascade is None or isinstance(cascade, (bool, int))): raise TypeError('Invalid type for the cascade option') tables = [] for t in table: u = only.get(t) if not (u is None or isinstance(u, (bool, int))): raise TypeError('Invalid type for the only option') if t.endswith('*'): if u: raise ValueError( 'Contradictory table name and only options') t = t[:-1].rstrip() t = self._escape_qualified_name(t) if u: t = 'ONLY %s' % t tables.append(t) q = ['TRUNCATE', ', '.join(tables)] if restart: q.append('RESTART IDENTITY') if cascade: q.append('CASCADE') q = ' '.join(q) self._do_debug(q) return self.db.query(q) def get_as_list(self, table, what=None, where=None, order=None, limit=None, offset=None, scalar=False): """Get a table as a list. This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter 'what' can restrict the query to only return a subset of the table columns. It can be a string, list or a tuple. The parameter 'where' can restrict the query to only return a subset of the table rows. It can be a string, list or a tuple of SQL expressions that all need to be fulfilled. The parameter 'order' specifies the ordering of the rows. It can also be a other string, list or a tuple. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set 'order' to False if you don't care about the ordering. The parameters 'limit' and 'offset' can be integers specifying the maximum number of rows returned and a number of rows skipped over. If you set the 'scalar' option to True, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. """ if not table: raise TypeError('The table name is missing') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' q = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) q.extend(['WHERE', where]) if order is None: try: order = self.pkey(table, True) except (KeyError, ProgrammingError): try: order = list(self.get_attnames(table)) except (KeyError, ProgrammingError): pass if order: if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) q.extend(['ORDER BY', order]) if limit: q.append('LIMIT %d' % limit) if offset: q.append('OFFSET %d' % offset) q = ' '.join(q) self._do_debug(q) q = self.db.query(q) res = q.namedresult() if res and scalar: res = [row[0] for row in res] return res def get_as_dict(self, table, keyname=None, what=None, where=None, order=None, limit=None, offset=None, scalar=False): """Get a table as a dictionary. This method is similar to get_as_list(), but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the 'scalar' option has been set to True. With the optional parameter 'keyname' you can specify an alternative set of columns to be used as the keys of the dictionary. It must be set as a string, list or a tuple. If the Python version supports it, the dictionary will be an OrderedDict using the order specified with the 'order' parameter or the key column(s) if not specified. You can set 'order' to False if you don't care about the ordering. In this case the returned dictionary will be an ordinary one. """ if not table: raise TypeError('The table name is missing') if not keyname: try: keyname = self.pkey(table, True) except (KeyError, ProgrammingError): raise _prg_error('Table %s has no primary key' % table) if isinstance(keyname, basestring): keyname = [keyname] elif not isinstance(keyname, (list, tuple)): raise KeyError('The keyname must be a string, list or tuple') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' q = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) q.extend(['WHERE', where]) if order is None: order = keyname if order: if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) q.extend(['ORDER BY', order]) if limit: q.append('LIMIT %d' % limit) if offset: q.append('OFFSET %d' % offset) q = ' '.join(q) self._do_debug(q) q = self.db.query(q) res = q.getresult() cls = OrderedDict if order else dict if not res: return cls() keyset = set(keyname) fields = q.listfields() if not keyset.issubset(fields): raise KeyError('Missing keyname in row') keyind, rowind = [], [] for i, f in enumerate(fields): (keyind if f in keyset else rowind).append(i) keytuple = len(keyind) > 1 getkey = itemgetter(*keyind) keys = map(getkey, res) if scalar: rowind = rowind[:1] rowtuple = False else: rowtuple = len(rowind) > 1 if scalar or rowtuple: getrow = itemgetter(*rowind) else: rowind = rowind[0] getrow = lambda row: (row[rowind],) rowtuple = True rows = map(getrow, res) if keytuple or rowtuple: if keytuple: keys = _namediter(_MemoryQuery(keys, keyname)) if rowtuple: fields = [f for f in fields if f not in keyset] rows = _namediter(_MemoryQuery(rows, fields)) return cls(zip(keys, rows)) def notification_handler(self, event, callback, arg_dict=None, timeout=None, stop_event=None): """Get notification handler that will run the given callback.""" return NotificationHandler(self, event, callback, arg_dict, timeout, stop_event) # if run as script, print some information if __name__ == '__main__': print('PyGreSQL version' + version) print('') print(__doc__) pygresql-5.1.2/pgconn.c000066400000000000000000001305761365010227600150330ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The connection object - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate connection object. */ static void conn_dealloc(connObject *self) { if (self->cnx) { Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS } Py_XDECREF(self->cast_hook); Py_XDECREF(self->notice_receiver); PyObject_Del(self); } /* Get connection attributes. */ static PyObject * conn_getattr(connObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* * Although we could check individually, there are only a few * attributes that don't require a live connection and unless someone * has an urgent need, this will have to do. */ /* first exception - close which returns a different error */ if (strcmp(name, "close") && !self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* list PostgreSQL connection fields */ /* postmaster host */ if (!strcmp(name, "host")) { char *r = PQhost(self->cnx); if (!r || r[0] == '/') /* Pg >= 9.6 can return a Unix socket path */ r = "localhost"; return PyStr_FromString(r); } /* postmaster port */ if (!strcmp(name, "port")) return PyInt_FromLong(atol(PQport(self->cnx))); /* selected database */ if (!strcmp(name, "db")) return PyStr_FromString(PQdb(self->cnx)); /* selected options */ if (!strcmp(name, "options")) return PyStr_FromString(PQoptions(self->cnx)); /* error (status) message */ if (!strcmp(name, "error")) return PyStr_FromString(PQerrorMessage(self->cnx)); /* connection status : 1 - OK, 0 - BAD */ if (!strcmp(name, "status")) return PyInt_FromLong(PQstatus(self->cnx) == CONNECTION_OK ? 1 : 0); /* provided user name */ if (!strcmp(name, "user")) return PyStr_FromString(PQuser(self->cnx)); /* protocol version */ if (!strcmp(name, "protocol_version")) return PyInt_FromLong(PQprotocolVersion(self->cnx)); /* backend version */ if (!strcmp(name, "server_version")) return PyInt_FromLong(PQserverVersion(self->cnx)); /* descriptor number of connection socket */ if (!strcmp(name, "socket")) { return PyInt_FromLong(PQsocket(self->cnx)); } /* PID of backend process */ if (!strcmp(name, "backend_pid")) { return PyInt_FromLong(PQbackendPID(self->cnx)); } /* whether the connection uses SSL */ if (!strcmp(name, "ssl_in_use")) { #ifdef SSL_INFO if (PQsslInUse(self->cnx)) { Py_INCREF(Py_True); return Py_True; } else { Py_INCREF(Py_False); return Py_False; } #else set_error_msg(NotSupportedError, "SSL info functions not supported"); return NULL; #endif } /* SSL attributes */ if (!strcmp(name, "ssl_attributes")) { #ifdef SSL_INFO return get_ssl_attributes(self->cnx); #else set_error_msg(NotSupportedError, "SSL info functions not supported"); return NULL; #endif } return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Check connection validity. */ static int _check_cnx_obj(connObject *self) { if (!self || !self->valid || !self->cnx) { set_error_msg(OperationalError, "Connection has been closed"); return 0; } return 1; } /* Create source object. */ static char conn_source__doc__[] = "source() -- create a new source object for this connection"; static PyObject * conn_source(connObject *self, PyObject *noargs) { sourceObject *source_obj; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* allocates new query object */ if (!(source_obj = PyObject_New(sourceObject, &sourceType))) { return NULL; } /* initializes internal parameters */ Py_XINCREF(self); source_obj->pgcnx = self; source_obj->result = NULL; source_obj->valid = 1; source_obj->arraysize = PG_ARRAYSIZE; return (PyObject *) source_obj; } /* Base method for execution of both unprepared and prepared queries */ static PyObject * _conn_query(connObject *self, PyObject *args, int prepared) { PyObject *query_str_obj, *param_obj = NULL; PGresult* result; queryObject* query_obj; char *query; int encoding, status, nparms = 0; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "O|O", &query_str_obj, ¶m_obj)) { return NULL; } encoding = PQclientEncoding(self->cnx); if (PyBytes_Check(query_str_obj)) { query = PyBytes_AsString(query_str_obj); query_str_obj = NULL; } else if (PyUnicode_Check(query_str_obj)) { query_str_obj = get_encoded_string(query_str_obj, encoding); if (!query_str_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(query_str_obj); } else { PyErr_SetString(PyExc_TypeError, "Method query() expects a string as first argument"); return NULL; } /* If param_obj is passed, ensure it's a non-empty tuple. We want to treat * an empty tuple the same as no argument since we'll get that when the * caller passes no arguments to db.query(), and historic behaviour was * to call PQexec() in that case, which can execute multiple commands. */ if (param_obj) { param_obj = PySequence_Fast( param_obj, "Method query() expects a sequence as second argument"); if (!param_obj) { Py_XDECREF(query_str_obj); return NULL; } nparms = (int) PySequence_Fast_GET_SIZE(param_obj); /* if there's a single argument and it's a list or tuple, it * contains the positional arguments. */ if (nparms == 1) { PyObject *first_obj = PySequence_Fast_GET_ITEM(param_obj, 0); if (PyList_Check(first_obj) || PyTuple_Check(first_obj)) { Py_DECREF(param_obj); param_obj = PySequence_Fast(first_obj, NULL); nparms = (int) PySequence_Fast_GET_SIZE(param_obj); } } } /* gets result */ if (nparms) { /* prepare arguments */ PyObject **str, **s; const char **parms, **p; register int i; str = (PyObject **) PyMem_Malloc((size_t) nparms * sizeof(*str)); parms = (const char **) PyMem_Malloc((size_t) nparms * sizeof(*parms)); if (!str || !parms) { PyMem_Free((void *) parms); PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); return PyErr_NoMemory(); } /* convert optional args to a list of strings -- this allows * the caller to pass whatever they like, and prevents us * from having to map types to OIDs */ for (i = 0, s = str, p = parms; i < nparms; ++i, ++p) { PyObject *obj = PySequence_Fast_GET_ITEM(param_obj, i); if (obj == Py_None) { *p = NULL; } else if (PyBytes_Check(obj)) { *p = PyBytes_AsString(obj); } else if (PyUnicode_Check(obj)) { PyObject *str_obj = get_encoded_string(obj, encoding); if (!str_obj) { PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* pass the UnicodeEncodeError */ return NULL; } *s++ = str_obj; *p = PyBytes_AsString(str_obj); } else { PyObject *str_obj = PyObject_Str(obj); if (!str_obj) { PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); PyErr_SetString( PyExc_TypeError, "Query parameter has no string representation"); return NULL; } *s++ = str_obj; *p = PyStr_AsString(str_obj); } } Py_BEGIN_ALLOW_THREADS result = prepared ? PQexecPrepared(self->cnx, query, nparms, parms, NULL, NULL, 0) : PQexecParams(self->cnx, query, nparms, NULL, parms, NULL, NULL, 0); Py_END_ALLOW_THREADS PyMem_Free((void *) parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); } else { Py_BEGIN_ALLOW_THREADS result = prepared ? PQexecPrepared(self->cnx, query, 0, NULL, NULL, NULL, 0) : PQexec(self->cnx, query); Py_END_ALLOW_THREADS } /* we don't need the query and its params any more */ Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* checks result validity */ if (!result) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->date_format = date_format; /* this is normally NULL */ /* checks result status */ if ((status = PQresultStatus(result)) != PGRES_TUPLES_OK) { switch (status) { case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute query", self->cnx, result); break; case PGRES_COMMAND_OK: { /* INSERT, UPDATE, DELETE */ Oid oid = PQoidValue(result); if (oid == InvalidOid) { /* not a single insert */ char *ret = PQcmdTuples(result); if (ret[0]) { /* return number of rows affected */ PyObject *obj = PyStr_FromString(ret); PQclear(result); return obj; } PQclear(result); Py_INCREF(Py_None); return Py_None; } /* for a single insert, return the oid */ PQclear(result); return PyInt_FromLong(oid); } case PGRES_COPY_OUT: /* no data will be received */ case PGRES_COPY_IN: PQclear(result); Py_INCREF(Py_None); return Py_None; default: set_error_msg(InternalError, "Unknown result status"); } PQclear(result); return NULL; /* error detected on query */ } if (!(query_obj = PyObject_New(queryObject, &queryType))) return PyErr_NoMemory(); /* stores result and returns object */ Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->encoding = encoding; query_obj->current_row = 0; query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); if (!query_obj->col_types) { Py_DECREF(query_obj); Py_DECREF(self); return NULL; } return (PyObject *) query_obj; } /* Database query */ static char conn_query__doc__[] = "query(sql, [arg]) -- create a new query object for this connection\n\n" "You must pass the SQL (string) request and you can optionally pass\n" "a tuple with positional parameters.\n"; static PyObject * conn_query(connObject *self, PyObject *args) { return _conn_query(self, args, 0); } /* Execute prepared statement. */ static char conn_query_prepared__doc__[] = "query_prepared(name, [arg]) -- execute a prepared statement\n\n" "You must pass the name (string) of the prepared statement and you can\n" "optionally pass a tuple with positional parameters.\n"; static PyObject * conn_query_prepared(connObject *self, PyObject *args) { return _conn_query(self, args, 1); } /* Create prepared statement. */ static char conn_prepare__doc__[] = "prepare(name, sql) -- create a prepared statement\n\n" "You must pass the name (string) of the prepared statement and the\n" "SQL (string) request for later execution.\n"; static PyObject * conn_prepare(connObject *self, PyObject *args) { char *name, *query; Py_ssize_t name_length, query_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#s#", &name, &name_length, &query, &query_length)) { PyErr_SetString(PyExc_TypeError, "Method prepare() takes two string arguments"); return NULL; } /* create prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQprepare(self->cnx, name, query, 0, NULL); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { PQclear(result); Py_INCREF(Py_None); return Py_None; /* success */ } set_error(ProgrammingError, "Cannot create prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } /* Describe prepared statement. */ static char conn_describe_prepared__doc__[] = "describe_prepared(name) -- describe a prepared statement\n\n" "You must pass the name (string) of the prepared statement.\n"; static PyObject * conn_describe_prepared(connObject *self, PyObject *args) { char *name; Py_ssize_t name_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#", &name, &name_length)) { PyErr_SetString(PyExc_TypeError, "Method prepare() takes a string argument"); return NULL; } /* describe prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQdescribePrepared(self->cnx, name); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { queryObject *query_obj = PyObject_New(queryObject, &queryType); if (!query_obj) return PyErr_NoMemory(); Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->encoding = PQclientEncoding(self->cnx); query_obj->current_row = 0; query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); return (PyObject *) query_obj; } set_error(ProgrammingError, "Cannot describe prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } #ifdef DIRECT_ACCESS static char conn_putline__doc__[] = "putline(line) -- send a line directly to the backend"; /* Direct access function: putline. */ static PyObject * conn_putline(connObject *self, PyObject *args) { char *line; Py_ssize_t line_length; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#", &line, &line_length)) { PyErr_SetString(PyExc_TypeError, "Method putline() takes a string argument"); return NULL; } /* sends line to backend */ if (PQputline(self->cnx, line)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Direct access function: getline. */ static char conn_getline__doc__[] = "getline() -- get a line directly from the backend"; static PyObject * conn_getline(connObject *self, PyObject *noargs) { char line[MAX_BUFFER_SIZE]; PyObject *str = NULL; /* GCC */ if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* gets line */ switch (PQgetline(self->cnx, line, MAX_BUFFER_SIZE)) { case 0: str = PyStr_FromString(line); break; case 1: PyErr_SetString(PyExc_MemoryError, "Buffer overflow"); str = NULL; break; case EOF: Py_INCREF(Py_None); str = Py_None; break; } return str; } /* Direct access function: end copy. */ static char conn_endcopy__doc__[] = "endcopy() -- synchronize client and server"; static PyObject * conn_endcopy(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* ends direct copy */ if (PQendcopy(self->cnx)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } Py_INCREF(Py_None); return Py_None; } #endif /* DIRECT_ACCESS */ /* Insert table */ static char conn_inserttable__doc__[] = "inserttable(table, data) -- insert list into table\n\n" "The fields in the list must be in the same order as in the table.\n"; static PyObject * conn_inserttable(connObject *self, PyObject *args) { PGresult *result; char *table, *buffer, *bufpt; int encoding; size_t bufsiz; PyObject *list, *sublist, *item; PyObject *(*getitem) (PyObject *, Py_ssize_t); PyObject *(*getsubitem) (PyObject *, Py_ssize_t); Py_ssize_t i, j, m, n; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "sO:filter", &table, &list)) { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects a string and a list as arguments"); return NULL; } /* checks list type */ if (PyList_Check(list)) { m = PyList_Size(list); getitem = PyList_GetItem; } else if (PyTuple_Check(list)) { m = PyTuple_Size(list); getitem = PyTuple_GetItem; } else { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects a list or a tuple" " as second argument"); return NULL; } /* allocate buffer */ if (!(buffer = PyMem_Malloc(MAX_BUFFER_SIZE))) return PyErr_NoMemory(); /* starts query */ sprintf(buffer, "copy %s from stdin", table); Py_BEGIN_ALLOW_THREADS result = PQexec(self->cnx, buffer); Py_END_ALLOW_THREADS if (!result) { PyMem_Free(buffer); PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } encoding = PQclientEncoding(self->cnx); PQclear(result); n = 0; /* not strictly necessary but avoids warning */ /* feed table */ for (i = 0; i < m; ++i) { sublist = getitem(list, i); if (PyTuple_Check(sublist)) { j = PyTuple_Size(sublist); getsubitem = PyTuple_GetItem; } else if (PyList_Check(sublist)) { j = PyList_Size(sublist); getsubitem = PyList_GetItem; } else { PyErr_SetString( PyExc_TypeError, "The second argument must contain a tuple or a list"); return NULL; } if (i) { if (j != n) { PyMem_Free(buffer); PyErr_SetString( PyExc_TypeError, "Arrays contained in second arg must have same size"); return NULL; } } else { n = j; /* never used before this assignment */ } /* builds insert line */ bufpt = buffer; bufsiz = MAX_BUFFER_SIZE - 1; for (j = 0; j < n; ++j) { if (j) { *bufpt++ = '\t'; --bufsiz; } item = getsubitem(sublist, j); /* convert item to string and append to buffer */ if (item == Py_None) { if (bufsiz > 2) { *bufpt++ = '\\'; *bufpt++ = 'N'; bufsiz -= 2; } else bufsiz = 0; } else if (PyBytes_Check(item)) { const char* t = PyBytes_AsString(item); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } } else if (PyUnicode_Check(item)) { PyObject *s = get_encoded_string(item, encoding); if (!s) { PyMem_Free(buffer); return NULL; /* pass the UnicodeEncodeError */ } else { const char* t = PyBytes_AsString(s); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } } else if (PyInt_Check(item) || PyLong_Check(item)) { PyObject* s = PyObject_Str(item); const char* t = PyStr_AsString(s); while (*t && bufsiz) { *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } else { PyObject* s = PyObject_Repr(item); const char* t = PyStr_AsString(s); while (*t && bufsiz) { if (*t == '\\' || *t == '\t' || *t == '\n') { *bufpt++ = '\\'; --bufsiz; if (!bufsiz) break; } *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } if (bufsiz <= 0) { PyMem_Free(buffer); return PyErr_NoMemory(); } } *bufpt++ = '\n'; *bufpt = '\0'; /* sends data */ if (PQputline(self->cnx, buffer)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); PyMem_Free(buffer); return NULL; } } /* ends query */ if (PQputline(self->cnx, "\\.\n")) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); PyMem_Free(buffer); return NULL; } if (PQendcopy(self->cnx)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PyMem_Free(buffer); return NULL; } PyMem_Free(buffer); /* no error : returns nothing */ Py_INCREF(Py_None); return Py_None; } /* Get transaction state. */ static char conn_transaction__doc__[] = "transaction() -- return the current transaction status"; static PyObject * conn_transaction(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } return PyInt_FromLong(PQtransactionStatus(self->cnx)); } /* Get parameter setting. */ static char conn_parameter__doc__[] = "parameter(name) -- look up a current parameter setting"; static PyObject * conn_parameter(connObject *self, PyObject *args) { const char *name; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method parameter() takes a string as argument"); return NULL; } name = PQparameterStatus(self->cnx, name); if (name) return PyStr_FromString(name); /* unknown parameter, return None */ Py_INCREF(Py_None); return Py_None; } /* Get current date format. */ static char conn_date_format__doc__[] = "date_format() -- return the current date format"; static PyObject * conn_date_format(connObject *self, PyObject *noargs) { const char *fmt; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* check if the date format is cached in the connection */ fmt = self->date_format; if (!fmt) { fmt = date_style_to_format(PQparameterStatus(self->cnx, "DateStyle")); self->date_format = fmt; /* cache the result */ } return PyStr_FromString(fmt); } #ifdef ESCAPING_FUNCS /* Escape literal */ static char conn_escape_literal__doc__[] = "escape_literal(str) -- escape a literal constant for use within SQL"; static PyObject * conn_escape_literal(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_literal() expects a string as argument"); return NULL; } to = PQescapeLiteral(self->cnx, from, (size_t) from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length, encoding); if (to) PQfreemem(to); return to_obj; } /* Escape identifier */ static char conn_escape_identifier__doc__[] = "escape_identifier(str) -- escape an identifier for use within SQL"; static PyObject * conn_escape_identifier(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_identifier() expects a string as argument"); return NULL; } to = PQescapeIdentifier(self->cnx, from, (size_t) from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length, encoding); if (to) PQfreemem(to); return to_obj; } #endif /* ESCAPING_FUNCS */ /* Escape string */ static char conn_escape_string__doc__[] = "escape_string(str) -- escape a string for use within SQL"; static PyObject * conn_escape_string(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2 * (size_t) from_length + 1; if ((Py_ssize_t) to_length < from_length) { /* overflow */ to_length = (size_t) from_length; from_length = (from_length - 1)/2; } to = (char *) PyMem_Malloc(to_length); to_length = PQescapeStringConn(self->cnx, to, from, (size_t) from_length, NULL); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char conn_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type bytea"; static PyObject * conn_escape_bytea(connObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *) PQescapeByteaConn(self->cnx, (unsigned char *) from, (size_t) from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length - 1); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } #ifdef LARGE_OBJECTS /* Constructor for large objects (internal use only) */ static largeObject * large_new(connObject *pgcnx, Oid oid) { largeObject *large_obj; if (!(large_obj = PyObject_New(largeObject, &largeType))) { return NULL; } Py_XINCREF(pgcnx); large_obj->pgcnx = pgcnx; large_obj->lo_fd = -1; large_obj->lo_oid = oid; return large_obj; } /* Create large object. */ static char conn_locreate__doc__[] = "locreate(mode) -- create a new large object in the database"; static PyObject * conn_locreate(connObject *self, PyObject *args) { int mode; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "Method locreate() takes an integer argument"); return NULL; } /* creates large object */ lo_oid = lo_creat(self->cnx, mode); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *) large_new(self, lo_oid); } /* Init from already known oid. */ static char conn_getlo__doc__[] = "getlo(oid) -- create a large object instance for the specified oid"; static PyObject * conn_getlo(connObject *self, PyObject *args) { int oid; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &oid)) { PyErr_SetString(PyExc_TypeError, "Method getlo() takes an integer argument"); return NULL; } lo_oid = (Oid) oid; if (lo_oid == 0) { PyErr_SetString(PyExc_ValueError, "The object oid can't be null"); return NULL; } /* creates object */ return (PyObject *) large_new(self, lo_oid); } /* Import unix file. */ static char conn_loimport__doc__[] = "loimport(name) -- create a new large object from specified file"; static PyObject * conn_loimport(connObject *self, PyObject *args) { char *name; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method loimport() takes a string argument"); return NULL; } /* imports file and checks result */ lo_oid = lo_import(self->cnx, name); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *) large_new(self, lo_oid); } #endif /* LARGE_OBJECTS */ /* Reset connection. */ static char conn_reset__doc__[] = "reset() -- reset connection with current parameters\n\n" "All derived queries and large objects derived from this connection\n" "will not be usable after this call.\n"; static PyObject * conn_reset(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* resets the connection */ PQreset(self->cnx); Py_INCREF(Py_None); return Py_None; } /* Cancel current command. */ static char conn_cancel__doc__[] = "cancel() -- abandon processing of the current command"; static PyObject * conn_cancel(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* request that the server abandon processing of the current command */ return PyInt_FromLong((long) PQrequestCancel(self->cnx)); } /* Get connection socket. */ static char conn_fileno__doc__[] = "fileno() -- return database connection socket file handle"; static PyObject * conn_fileno(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } return PyInt_FromLong((long) PQsocket(self->cnx)); } /* Set external typecast callback function. */ static char conn_set_cast_hook__doc__[] = "set_cast_hook(func) -- set a fallback typecast function"; static PyObject * conn_set_cast_hook(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->cast_hook); self->cast_hook = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->cast_hook); self->cast_hook = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_cast_hook() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_cast_hook__doc__[] = "get_cast_hook() -- get the fallback typecast function"; static PyObject * conn_get_cast_hook(connObject *self, PyObject *noargs) { PyObject *ret = self->cast_hook;; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Set notice receiver callback function. */ static char conn_set_notice_receiver__doc__[] = "set_notice_receiver(func) -- set the current notice receiver"; static PyObject * conn_set_notice_receiver(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->notice_receiver); self->notice_receiver = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->notice_receiver); self->notice_receiver = func; PQsetNoticeReceiver(self->cnx, notice_receiver, self); Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_notice_receiver() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_notice_receiver__doc__[] = "get_notice_receiver() -- get the current notice receiver"; static PyObject * conn_get_notice_receiver(connObject *self, PyObject *noargs) { PyObject *ret = self->notice_receiver; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Close without deleting. */ static char conn_close__doc__[] = "close() -- close connection\n\n" "All instances of the connection object and derived objects\n" "(queries and large objects) can no longer be used after this call.\n"; static PyObject * conn_close(connObject *self, PyObject *noargs) { /* connection object cannot already be closed */ if (!self->cnx) { set_error_msg(InternalError, "Connection already closed"); return NULL; } Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS self->cnx = NULL; Py_INCREF(Py_None); return Py_None; } /* Get asynchronous notify. */ static char conn_get_notify__doc__[] = "getnotify() -- get database notify for this connection"; static PyObject * conn_get_notify(connObject *self, PyObject *noargs) { PGnotify *notify; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* checks for NOTIFY messages */ PQconsumeInput(self->cnx); if (!(notify = PQnotifies(self->cnx))) { Py_INCREF(Py_None); return Py_None; } else { PyObject *notify_result, *tmp; if (!(tmp = PyStr_FromString(notify->relname))) { return NULL; } if (!(notify_result = PyTuple_New(3))) { return NULL; } PyTuple_SET_ITEM(notify_result, 0, tmp); if (!(tmp = PyInt_FromLong(notify->be_pid))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 1, tmp); /* extra exists even in old versions that did not support it */ if (!(tmp = PyStr_FromString(notify->extra))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 2, tmp); PQfreemem(notify); return notify_result; } } /* Get the list of connection attributes. */ static PyObject * conn_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sssssssssssss]", "host", "port", "db", "options", "error", "status", "user", "protocol_version", "server_version", "socket", "backend_pid", "ssl_in_use", "ssl_attributes"); return attrs; } /* Connection object methods */ static struct PyMethodDef conn_methods[] = { {"__dir__", (PyCFunction) conn_dir, METH_NOARGS, NULL}, {"source", (PyCFunction) conn_source, METH_NOARGS, conn_source__doc__}, {"query", (PyCFunction) conn_query, METH_VARARGS, conn_query__doc__}, {"query_prepared", (PyCFunction) conn_query_prepared, METH_VARARGS, conn_query_prepared__doc__}, {"prepare", (PyCFunction) conn_prepare, METH_VARARGS, conn_prepare__doc__}, {"describe_prepared", (PyCFunction) conn_describe_prepared, METH_VARARGS, conn_describe_prepared__doc__}, {"reset", (PyCFunction) conn_reset, METH_NOARGS, conn_reset__doc__}, {"cancel", (PyCFunction) conn_cancel, METH_NOARGS, conn_cancel__doc__}, {"close", (PyCFunction) conn_close, METH_NOARGS, conn_close__doc__}, {"fileno", (PyCFunction) conn_fileno, METH_NOARGS, conn_fileno__doc__}, {"get_cast_hook", (PyCFunction) conn_get_cast_hook, METH_NOARGS, conn_get_cast_hook__doc__}, {"set_cast_hook", (PyCFunction) conn_set_cast_hook, METH_O, conn_set_cast_hook__doc__}, {"get_notice_receiver", (PyCFunction) conn_get_notice_receiver, METH_NOARGS, conn_get_notice_receiver__doc__}, {"set_notice_receiver", (PyCFunction) conn_set_notice_receiver, METH_O, conn_set_notice_receiver__doc__}, {"getnotify", (PyCFunction) conn_get_notify, METH_NOARGS, conn_get_notify__doc__}, {"inserttable", (PyCFunction) conn_inserttable, METH_VARARGS, conn_inserttable__doc__}, {"transaction", (PyCFunction) conn_transaction, METH_NOARGS, conn_transaction__doc__}, {"parameter", (PyCFunction) conn_parameter, METH_VARARGS, conn_parameter__doc__}, {"date_format", (PyCFunction) conn_date_format, METH_NOARGS, conn_date_format__doc__}, #ifdef ESCAPING_FUNCS {"escape_literal", (PyCFunction) conn_escape_literal, METH_O, conn_escape_literal__doc__}, {"escape_identifier", (PyCFunction) conn_escape_identifier, METH_O, conn_escape_identifier__doc__}, #endif /* ESCAPING_FUNCS */ {"escape_string", (PyCFunction) conn_escape_string, METH_O, conn_escape_string__doc__}, {"escape_bytea", (PyCFunction) conn_escape_bytea, METH_O, conn_escape_bytea__doc__}, #ifdef DIRECT_ACCESS {"putline", (PyCFunction) conn_putline, METH_VARARGS, conn_putline__doc__}, {"getline", (PyCFunction) conn_getline, METH_NOARGS, conn_getline__doc__}, {"endcopy", (PyCFunction) conn_endcopy, METH_NOARGS, conn_endcopy__doc__}, #endif /* DIRECT_ACCESS */ #ifdef LARGE_OBJECTS {"locreate", (PyCFunction) conn_locreate, METH_VARARGS, conn_locreate__doc__}, {"getlo", (PyCFunction) conn_getlo, METH_VARARGS, conn_getlo__doc__}, {"loimport", (PyCFunction) conn_loimport, METH_VARARGS, conn_loimport__doc__}, #endif /* LARGE_OBJECTS */ {NULL, NULL} /* sentinel */ }; static char conn__doc__[] = "PostgreSQL connection object"; /* Connection type definition */ static PyTypeObject connType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Connection", /* tp_name */ sizeof(connObject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) conn_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ (getattrofunc) conn_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ conn__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ conn_methods, /* tp_methods */ }; pygresql-5.1.2/pgdb.py000066400000000000000000001741171365010227600146700ustar00rootroot00000000000000#!/usr/bin/python # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the DB-API 2 compatible pgdb module. # # Copyright (c) 2020 by the PyGreSQL Development Team # # Please see the LICENSE.TXT file for specific restrictions. """pgdb - DB-API 2.0 compliant module for PyGreSQL. (c) 1999, Pascal Andre . See package documentation for further information on copyright. Inline documentation is sparse. See DB-API 2.0 specification for usage information: http://www.python.org/peps/pep-0249.html Basic usage: pgdb.connect(connect_string) # open a connection # connect_string = 'host:database:user:password:opt' # All parts are optional. You may also pass host through # password as keyword arguments. To pass a port, # pass it in the host keyword parameter: connection = pgdb.connect(host='localhost:5432') cursor = connection.cursor() # open a cursor cursor.execute(query[, params]) # Execute a query, binding params (a dictionary) if they are # passed. The binding syntax is the same as the % operator # for dictionaries, and no quoting is done. cursor.executemany(query, list of params) # Execute a query many times, binding each param dictionary # from the list. cursor.fetchone() # fetch one row, [value, value, ...] cursor.fetchall() # fetch all rows, [[value, value, ...], ...] cursor.fetchmany([size]) # returns size or cursor.arraysize number of rows, # [[value, value, ...], ...] from result set. # Default cursor.arraysize is 1. cursor.description # returns information about the columns # [(column_name, type_name, display_size, # internal_size, precision, scale, null_ok), ...] # Note that display_size, precision, scale and null_ok # are not implemented. cursor.rowcount # number of rows available in the result set # Available after a call to execute. connection.commit() # commit transaction connection.rollback() # or rollback transaction cursor.close() # close the cursor connection.close() # close the connection """ from __future__ import print_function, division try: from _pg import * except ImportError: import os import sys # see https://docs.python.org/3/whatsnew/3.8.html#ctypes if os.name == 'nt' and sys.version_info >= (3, 8): for path in os.environ["PATH"].split(os.pathsep): if os.path.exists(os.path.join(path, 'libpq.dll')): with os.add_dll_directory(os.path.abspath(path)): from _pg import * break else: raise else: raise __version__ = version __all__ = [ 'Connection', 'Cursor', 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', 'Binary', 'Interval', 'Uuid', 'Hstore', 'Json', 'Literal', 'Type', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOL', 'SMALLINT', 'INTEGER', 'LONG', 'FLOAT', 'NUMERIC', 'MONEY', 'DATE', 'TIME', 'TIMESTAMP', 'INTERVAL', 'UUID', 'HSTORE', 'JSON', 'ARRAY', 'RECORD', 'Error', 'Warning', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError', 'IntegrityError', 'ProgrammingError', 'NotSupportedError', 'apilevel', 'connect', 'paramstyle', 'threadsafety', 'get_typecast', 'set_typecast', 'reset_typecast', 'version', '__version__'] from datetime import date, time, datetime, timedelta, tzinfo from time import localtime from decimal import Decimal from uuid import UUID as Uuid from math import isnan, isinf try: from collections.abc import Iterable except ImportError: # Python < 3.3 from collections import Iterable from collections import namedtuple from keyword import iskeyword from functools import partial from re import compile as regex from json import loads as jsondecode, dumps as jsonencode try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str try: # noinspection PyUnresolvedReferences basestring except NameError: # Python >= 3.0 basestring = (str, bytes) try: from functools import lru_cache except ImportError: # Python < 3.2 from functools import update_wrapper try: from _thread import RLock except ImportError: class RLock: # for builds without threads def __enter__(self): pass def __exit__(self, exctype, excinst, exctb): pass def lru_cache(maxsize=128): """Simplified functools.lru_cache decorator for one argument.""" def decorator(function): sentinel = object() cache = {} get = cache.get lock = RLock() root = [] root_full = [root, False] root[:] = [root, root, None, None] if maxsize == 0: def wrapper(arg): res = function(arg) return res elif maxsize is None: def wrapper(arg): res = get(arg, sentinel) if res is not sentinel: return res res = function(arg) cache[arg] = res return res else: def wrapper(arg): with lock: link = get(arg) if link is not None: root = root_full[0] prev, next, _arg, res = link prev[1] = next next[0] = prev last = root[0] last[1] = root[0] = link link[0] = last link[1] = root return res res = function(arg) with lock: root, full = root_full if arg in cache: pass elif full: oldroot = root oldroot[2] = arg oldroot[3] = res root = root_full[0] = oldroot[1] oldarg = root[2] oldres = root[3] # keep reference root[2] = root[3] = None del cache[oldarg] cache[arg] = oldroot else: last = root[0] link = [last, root, arg, res] last[1] = root[0] = cache[arg] = link if len(cache) >= maxsize: root_full[1] = True return res wrapper.__wrapped__ = function return update_wrapper(wrapper, function) return decorator ### Module Constants # compliant with DB API 2.0 apilevel = '2.0' # module may be shared, but not connections threadsafety = 1 # this module use extended python format codes paramstyle = 'pyformat' # shortcut methods have been excluded from DB API 2 and # are not recommended by the DB SIG, but they can be handy shortcutmethods = 1 ### Internal Type Handling try: from inspect import signature except ImportError: # Python < 3.3 from inspect import getargspec def get_args(func): return getargspec(func).args else: def get_args(func): return list(signature(func).parameters) try: from datetime import timezone except ImportError: # Python < 3.2 class timezone(tzinfo): """Simple timezone implementation.""" def __init__(self, offset, name=None): self.offset = offset if not name: minutes = self.offset.days * 1440 + self.offset.seconds // 60 if minutes < 0: hours, minutes = divmod(-minutes, 60) hours = -hours else: hours, minutes = divmod(minutes, 60) name = 'UTC%+03d:%02d' % (hours, minutes) self.name = name def utcoffset(self, dt): return self.offset def tzname(self, dt): return self.name def dst(self, dt): return None timezone.utc = timezone(timedelta(0), 'UTC') _has_timezone = False else: _has_timezone = True # time zones used in Postgres timestamptz output _timezones = dict(CET='+0100', EET='+0200', EST='-0500', GMT='+0000', HST='-1000', MET='+0100', MST='-0700', UCT='+0000', UTC='+0000', WET='+0000') def _timezone_as_offset(tz): if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def _get_timezone(tz): tz = _timezone_as_offset(tz) minutes = 60 * int(tz[1:3]) + int(tz[3:5]) if tz[0] == '-': minutes = -minutes return timezone(timedelta(minutes=minutes), tz) def decimal_type(decimal_type=None): """Get or set global type to be used for decimal values. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ global Decimal if decimal_type is not None: Decimal = decimal_type set_typecast('numeric', decimal_type) return Decimal def cast_bool(value): """Cast boolean value in database format to bool.""" if value: return value[0] in ('t', 'T') def cast_money(value): """Cast money value in database format to Decimal.""" if value: value = value.replace('(', '-') return Decimal(''.join(c for c in value if c.isdigit() or c in '.-')) def cast_int2vector(value): """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value, connection): """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max value = value.split() if value[-1] == 'BC': return date.min value = value[0] if len(value) > 10: return date.max fmt = connection.date_format() return datetime.strptime(value, fmt).date() def cast_time(value): """Cast a time value.""" fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value): """Cast a timetz value.""" tz = _re_timezone.match(value) if tz: value, tz = tz.groups() else: tz = '+0000' fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' if _has_timezone: value += _timezone_as_offset(tz) fmt += '%z' return datetime.strptime(value, fmt).timetz() return datetime.strptime(value, fmt).timetz().replace( tzinfo=_get_timezone(tz)) def cast_timestamp(value, connection): """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:5] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(value), ' '.join(fmt)) def cast_timestamptz(value, connection): """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max value = value.split() if value[-1] == 'BC': return datetime.min fmt = connection.date_format() if fmt.endswith('-%Y') and len(value) > 2: value = value[1:] if len(value[3]) > 4: return datetime.max fmt = ['%d %b' if fmt.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(value[2]) > 8 else '%H:%M:%S', '%Y'] value, tz = value[:-1], value[-1] else: if fmt.startswith('%Y-'): tz = _re_timezone.match(value[1]) if tz: value[1], tz = tz.groups() else: tz = '+0000' else: value, tz = value[:-1], value[-1] if len(value[0]) > 10: return datetime.max fmt = [fmt, '%H:%M:%S.%f' if len(value[1]) > 8 else '%H:%M:%S'] if _has_timezone: value.append(_timezone_as_offset(tz)) fmt.append('%z') return datetime.strptime(' '.join(value), ' '.join(fmt)) return datetime.strptime(' '.join(value), ' '.join(fmt)).replace( tzinfo=_get_timezone(tz)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value): """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: m = [d or '0' for d in m.groups()] secs_ago = m.pop(5) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: m, ago = [d or '0' for d in m.groups()[:8]], m.group(9) secs_ago = m.pop(5) == '-' m = [-int(d) for d in m] if ago else [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): m = [d or '0' for d in m.groups()] years_ago = m.pop(0) == '-' hours_ago = m.pop(3) == '-' m = [int(d) for d in m] years, mons, days, hours, mins, secs, usecs = m if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError('Cannot parse interval: %s' % value) days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults = {'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': long, 'oid': int, 'hstore': cast_hstore, 'json': jsondecode, 'jsonb': jsondecode, 'float4': float, 'float8': float, 'numeric': Decimal, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': Uuid, 'anyarray': cast_array, 'record': cast_record} connection = None # will be set in local connection specific instances def __missing__(self, typ): """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError('Invalid type: %s' % typ) cast = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): # create array cast base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: # store only if base type exists self[typ] = cast return cast @staticmethod def _needs_connection(func): """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False else: return 'connection' in args[1:] def _add_connection(self, cast): """Add a connection argument to the typecast function if necessary.""" if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ, default=None): """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ, cast): """Set a typecast function for the specified database type(s).""" if isinstance(typ, basestring): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop('_%s' % t, None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop('_%s' % t, None) def reset(self, typ=None): """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ defaults = self.defaults if typ is None: self.clear() self.update(defaults) else: if isinstance(typ, basestring): typ = [typ] for t in typ: cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) t = '_%s' % t cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) else: self.pop(t, None) else: self.pop(t, None) self.pop('_%s' % t, None) def create_array_cast(self, basecast): """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v): return cast_array(v, basecast) return cast def create_record_cast(self, name, fields, casts): """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) def cast(v): return record(*cast_record(v, casts)) return cast _typecasts = Typecasts() # this is the global typecast dictionary def get_typecast(typ): """Get the global typecast function for the given database type(s).""" return _typecasts.get(typ) def set_typecast(typ, cast): """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.set(typ, cast) def reset_typecast(typ=None): """Reset the global typecasts for the given type(s) to their default. When no type is specified, all typecasts will be reset. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.reset(typ) class LocalTypecasts(Typecasts): """Map typecasts, including local composite types, to cast functions.""" defaults = _typecasts connection = None # will be set in a connection specific instance def __missing__(self, typ): """Create a cast function if it is not cached.""" if typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: cast = self.defaults.get(typ) if cast: cast = self._add_connection(cast) self[typ] = cast else: fields = self.get_fields(typ) if fields: casts = [self[field.type] for field in fields] fields = [field.name for field in fields] cast = self.create_record_cast(typ, fields, casts) self[typ] = cast return cast def get_fields(self, typ): """Return the fields for the given record type. This method will be replaced with a method that looks up the fields using the type cache of the connection. """ return [] class TypeCode(str): """Class representing the type_code used by the DB-API 2.0. TypeCode objects are strings equal to the PostgreSQL type name, but carry some additional information. """ @classmethod def create(cls, oid, name, len, type, category, delim, relid): """Create a type code for a PostgreSQL data type.""" self = cls(name) self.oid = oid self.len = len self.type = type self.category = category self.delim = delim self.relid = relid return self FieldInfo = namedtuple('FieldInfo', ['name', 'type']) class TypeCache(dict): """Cache for database types. This cache maps type OIDs and names to TypeCode strings containing important information on the associated database type. """ def __init__(self, cnx): """Initialize type cache for connection.""" super(TypeCache, self).__init__() self._escape_string = cnx.escape_string self._src = cnx.source() self._typecasts = LocalTypecasts() self._typecasts.get_fields = self.get_fields self._typecasts.connection = cnx if cnx.server_version < 80400: # older remote databases (not officially supported) self._query_pg_type = ("SELECT oid, typname," " typlen, typtype, null as typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type WHERE oid OPERATOR(pg_catalog.=) %s") else: self._query_pg_type = ("SELECT oid, typname," " typlen, typtype, typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type WHERE oid OPERATOR(pg_catalog.=) %s") def __missing__(self, key): """Get the type info from the database if it is not cached.""" if isinstance(key, int): oid = key else: if '.' not in key and '"' not in key: key = '"%s"' % (key,) oid = "'%s'::regtype" % (self._escape_string(key),) try: self._src.execute(self._query_pg_type % (oid,)) except ProgrammingError: res = None else: res = self._src.fetch(1) if not res: raise KeyError('Type %s could not be found' % (key,)) res = res[0] type_code = TypeCode.create(int(res[0]), res[1], int(res[2]), res[3], res[4], res[5], int(res[6])) self[type_code.oid] = self[str(type_code)] = type_code return type_code def get(self, key, default=None): """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_fields(self, typ): """Get the names and types of the fields of composite types.""" if not isinstance(typ, TypeCode): typ = self.get(typ) if not typ: return None if not typ.relid: return None # this type is not composite self._src.execute("SELECT attname, atttypid" " FROM pg_catalog.pg_attribute" " WHERE attrelid OPERATOR(pg_catalog.=) %s" " AND attnum OPERATOR(pg_catalog.>) 0" " AND NOT attisdropped ORDER BY attnum" % (typ.relid,)) return [FieldInfo(name, self.get(int(oid))) for name, oid in self._src.fetch(-1)] def get_typecast(self, typ): """Get the typecast function for the given database type.""" return self._typecasts.get(typ) def set_typecast(self, typ, cast): """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ=None): """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value, typ): """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None cast = self.get_typecast(typ) if not cast or cast is str: # no typecast is necessary return value return cast(value) class _quotedict(dict): """Dictionary with auto quoting of its items. The quote attribute must be set to the desired quote function. """ def __getitem__(self, key): return self.quote(super(_quotedict, self).__getitem__(key)) ### Error Messages def _db_error(msg, cls=DatabaseError): """Return DatabaseError with empty sqlstate attribute.""" error = cls(msg) error.sqlstate = None return error def _op_error(msg): """Return OperationalError.""" return _db_error(msg, OperationalError) ### Row Tuples _re_fieldname = regex('^[A-Za-z][_a-zA-Z0-9]*$') # The result rows for database operations are returned as named tuples # by default. Since creating namedtuple classes is a somewhat expensive # operation, we cache up to 1024 of these classes by default. @lru_cache(maxsize=1024) def _row_factory(names): """Get a namedtuple factory for row results with the given names.""" try: try: return namedtuple('Row', names, rename=True)._make except TypeError: # Python 2.6 and 3.0 do not support rename names = [v if _re_fieldname.match(v) and not iskeyword(v) else 'column_%d' % (n,) for n, v in enumerate(names)] return namedtuple('Row', names)._make except ValueError: # there is still a problem with the field names names = ['column_%d' % (n,) for n in range(len(names))] return namedtuple('Row', names)._make def set_row_factory_size(maxsize): """Change the size of the namedtuple factory cache. If maxsize is set to None, the cache can grow without bound. """ global _row_factory _row_factory = lru_cache(maxsize)(_row_factory.__wrapped__) ### Cursor Object class Cursor(object): """Cursor object.""" def __init__(self, dbcnx): """Create a cursor object for the database connection.""" self.connection = self._dbcnx = dbcnx self._cnx = dbcnx._cnx self.type_cache = dbcnx.type_cache self._src = self._cnx.source() # the official attribute for describing the result columns self._description = None if self.row_factory is Cursor.row_factory: # the row factory needs to be determined dynamically self.row_factory = None else: self.build_row_factory = None self.rowcount = -1 self.arraysize = 1 self.lastrowid = None def __iter__(self): """Make cursor compatible to the iteration protocol.""" return self def __enter__(self): """Enter the runtime context for the cursor object.""" return self def __exit__(self, et, ev, tb): """Exit the runtime context for the cursor object.""" self.close() def _quote(self, value): """Quote value depending on its type.""" if value is None: return 'NULL' if isinstance(value, (Hstore, Json)): value = str(value) if isinstance(value, basestring): if isinstance(value, Binary): value = self._cnx.escape_bytea(value) if bytes is not str: # Python >= 3.0 value = value.decode('ascii') else: value = self._cnx.escape_string(value) return "'%s'" % (value,) if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, long, Decimal, Literal)): return value if isinstance(value, datetime): if value.tzinfo: return "'%s'::timestamptz" % (value,) return "'%s'::timestamp" % (value,) if isinstance(value, date): return "'%s'::date" % (value,) if isinstance(value, time): if value.tzinfo: return "'%s'::timetz" % (value,) return "'%s'::time" % value if isinstance(value, timedelta): return "'%s'::interval" % (value,) if isinstance(value, Uuid): return "'%s'::uuid" % (value,) if isinstance(value, list): # Quote value as an ARRAY constructor. This is better than using # an array literal because it carries the information that this is # an array and not a string. One issue with this syntax is that # you need to add an explicit typecast when passing empty arrays. # The ARRAY keyword is actually only necessary at the top level. if not value: # exception for empty array return "'{}'" q = self._quote try: return 'ARRAY[%s]' % (','.join(str(q(v)) for v in value),) except UnicodeEncodeError: # Python 2 with non-ascii values return u'ARRAY[%s]' % (','.join(unicode(q(v)) for v in value),) if isinstance(value, tuple): # Quote as a ROW constructor. This is better than using a record # literal because it carries the information that this is a record # and not a string. We don't use the keyword ROW in order to make # this usable with the IN syntax as well. It is only necessary # when the records has a single column which is not really useful. q = self._quote try: return '(%s)' % (','.join(str(q(v)) for v in value),) except UnicodeEncodeError: # Python 2 with non-ascii values return u'(%s)' % (','.join(unicode(q(v)) for v in value),) try: value = value.__pg_repr__() except AttributeError: raise InterfaceError( 'Do not know how to adapt type %s' % (type(value),)) if isinstance(value, (tuple, list)): value = self._quote(value) return value def _quoteparams(self, string, parameters): """Quote parameters. This function works for both mappings and sequences. The function should be used even when there are no parameters, so that we have a consistent behavior regarding percent signs. """ if not parameters: try: return string % () # unescape literal quotes if possible except (TypeError, ValueError): return string # silently accept unescaped quotes if isinstance(parameters, dict): parameters = _quotedict(parameters) parameters.quote = self._quote else: parameters = tuple(map(self._quote, parameters)) return string % parameters def _make_description(self, info): """Make the description tuple for the given field info.""" name, typ, size, mod = info[1:] type_code = self.type_cache[typ] if mod > 0: mod -= 4 if type_code == 'numeric': precision, scale = mod >> 16, mod & 0xffff size = precision else: if not size: size = type_code.size if size == -1: size = mod precision = scale = None return CursorDescription(name, type_code, None, size, precision, scale, None) @property def description(self): """Read-only attribute describing the result columns.""" descr = self._description if self._description is True: make = self._make_description descr = [make(info) for info in self._src.listinfo()] self._description = descr return descr @property def colnames(self): """Unofficial convenience method for getting the column names.""" return [d[0] for d in self.description] @property def coltypes(self): """Unofficial convenience method for getting the column types.""" return [d[1] for d in self.description] def close(self): """Close the cursor object.""" self._src.close() def execute(self, operation, parameters=None): """Prepare and execute a database operation (query or command).""" # The parameters may also be specified as list of tuples to e.g. # insert multiple rows in a single operation, but this kind of # usage is deprecated. We make several plausibility checks because # tuples can also be passed with the meaning of ROW constructors. if (parameters and isinstance(parameters, list) and len(parameters) > 1 and all(isinstance(p, tuple) for p in parameters) and all(len(p) == len(parameters[0]) for p in parameters[1:])): return self.executemany(operation, parameters) else: # not a list of tuples return self.executemany(operation, [parameters]) def executemany(self, operation, seq_of_parameters): """Prepare operation and execute it against a parameter sequence.""" if not seq_of_parameters: # don't do anything without parameters return self._description = None self.rowcount = -1 # first try to execute all queries rowcount = 0 sql = "BEGIN" try: if not self._dbcnx._tnx and not self._dbcnx.autocommit: try: self._src.execute(sql) except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't start transaction") else: self._dbcnx._tnx = True for parameters in seq_of_parameters: sql = operation sql = self._quoteparams(sql, parameters) rows = self._src.execute(sql) if rows: # true if not DML rowcount += rows else: self.rowcount = -1 except DatabaseError: raise # database provides error message except Error as err: raise _db_error( "Error in '%s': '%s' " % (sql, err), InterfaceError) except Exception as err: raise _op_error("Internal error in '%s': %s" % (sql, err)) # then initialize result raw count and description if self._src.resulttype == RESULT_DQL: self._description = True # fetch on demand self.rowcount = self._src.ntuples self.lastrowid = None if self.build_row_factory: self.row_factory = self.build_row_factory() else: self.rowcount = rowcount self.lastrowid = self._src.oidstatus() # return the cursor object, so you can write statements such as # "cursor.execute(...).fetchall()" or "for row in cursor.execute(...)" return self def fetchone(self): """Fetch the next row of a query result set.""" res = self.fetchmany(1, False) try: return res[0] except IndexError: return None def fetchall(self): """Fetch all (remaining) rows of a query result.""" return self.fetchmany(-1, False) def fetchmany(self, size=None, keep=False): """Fetch the next set of rows of a query result. The number of rows to fetch per call is specified by the size parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. If you set the keep parameter to true, this is kept as new arraysize. """ if size is None: size = self.arraysize if keep: self.arraysize = size try: result = self._src.fetch(size) except DatabaseError: raise except Error as err: raise _db_error(str(err)) typecast = self.type_cache.typecast return [self.row_factory([typecast(value, typ) for typ, value in zip(self.coltypes, row)]) for row in result] def callproc(self, procname, parameters=None): """Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each input argument that the procedure expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The procedure may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. """ n = parameters and len(parameters) or 0 query = 'select * from "%s"(%s)' % (procname, ','.join(n * ['%s'])) self.execute(query, parameters) return parameters def copy_from(self, stream, table, format=None, sep=None, null=None, size=None, columns=None): """Copy data from an input stream to the specified table. The input stream can be a file-like object with a read() method or it can also be an iterable returning a row or multiple rows of input on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' try: read = stream.read except AttributeError: if size: raise ValueError("Size must only be set for file-like objects") if binary_format: input_type = bytes type_name = 'byte strings' else: input_type = basestring type_name = 'strings' if isinstance(stream, basestring): if not isinstance(stream, input_type): raise ValueError("The input must be %s" % (type_name,)) if not binary_format: if isinstance(stream, str): if not stream.endswith('\n'): stream += '\n' else: if not stream.endswith(b'\n'): stream += b'\n' def chunks(): yield stream elif isinstance(stream, Iterable): def chunks(): for chunk in stream: if not isinstance(chunk, input_type): raise ValueError( "Input stream must consist of %s" % (type_name,)) if isinstance(chunk, str): if not chunk.endswith('\n'): chunk += '\n' else: if not chunk.endswith(b'\n'): chunk += b'\n' yield chunk else: raise TypeError("Need an input stream to copy from") else: if size is None: size = 8192 elif not isinstance(size, int): raise TypeError("The size option must be an integer") if size > 0: def chunks(): while True: buffer = read(size) yield buffer if not buffer or len(buffer) < size: break else: def chunks(): yield read() if not table or not isinstance(table, basestring): raise TypeError("Need a table to copy to") if table.lower().startswith('select'): raise ValueError("Must specify a table, not a query") else: table = '"%s"' % (table,) operation = ['copy %s' % (table,)] options = [] params = [] if format is not None: if not isinstance(format, basestring): raise TypeError("The format option must be be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append('format %s' % (format,)) if sep is not None: if not isinstance(sep, basestring): raise TypeError("The sep option must be a string") if format == 'binary': raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') params.append(sep) if null is not None: if not isinstance(null, basestring): raise TypeError("The null option must be a string") options.append('null %s') params.append(null) if columns: if not isinstance(columns, basestring): columns = ','.join('"%s"' % (col,) for col in columns) operation.append('(%s)' % (columns,)) operation.append("from stdin") if options: operation.append('(%s)' % (','.join(options),)) operation = ' '.join(operation) putdata = self._src.putdata self.execute(operation, params) try: for chunk in chunks(): putdata(chunk) except BaseException as error: self.rowcount = -1 # the following call will re-raise the error putdata(error) else: self.rowcount = putdata(None) # return the cursor object, so you can chain operations return self def copy_to(self, stream, table, format=None, sep=None, null=None, decode=None, columns=None): """Copy data from the specified table to an output stream. The output stream can be a file-like object with a write() method or it can also be None, in which case the method will return a generator yielding a row on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a select query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' if stream is not None: try: write = stream.write except AttributeError: raise TypeError("Need an output stream to copy to") if not table or not isinstance(table, basestring): raise TypeError("Need a table to copy to") if table.lower().startswith('select'): if columns: raise ValueError("Columns must be specified in the query") table = '(%s)' % (table,) else: table = '"%s"' % (table,) operation = ['copy %s' % (table,)] options = [] params = [] if format is not None: if not isinstance(format, basestring): raise TypeError("The format option must be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append('format %s' % (format,)) if sep is not None: if not isinstance(sep, basestring): raise TypeError("The sep option must be a string") if binary_format: raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') params.append(sep) if null is not None: if not isinstance(null, basestring): raise TypeError("The null option must be a string") options.append('null %s') params.append(null) if decode is None: if format == 'binary': decode = False else: decode = str is unicode else: if not isinstance(decode, (int, bool)): raise TypeError("The decode option must be a boolean") if decode and binary_format: raise ValueError( "The decode option is not allowed with binary format") if columns: if not isinstance(columns, basestring): columns = ','.join('"%s"' % (col,) for col in columns) operation.append('(%s)' % (columns,)) operation.append("to stdout") if options: operation.append('(%s)' % (','.join(options),)) operation = ' '.join(operation) getdata = self._src.getdata self.execute(operation, params) def copy(): self.rowcount = 0 while True: row = getdata(decode) if isinstance(row, int): if self.rowcount != row: self.rowcount = row break self.rowcount += 1 yield row if stream is None: # no input stream, return the generator return copy() # write the rows to the file-like input stream for row in copy(): write(row) # return the cursor object, so you can chain operations return self def __next__(self): """Return the next row (support for the iteration protocol).""" res = self.fetchone() if res is None: raise StopIteration return res # Note that since Python 2.6 the iterator protocol uses __next()__ # instead of next(), we keep it only for backward compatibility of pgdb. next = __next__ @staticmethod def nextset(): """Not supported.""" raise NotSupportedError("The nextset() method is not supported") @staticmethod def setinputsizes(sizes): """Not supported.""" pass # unsupported, but silently passed @staticmethod def setoutputsize(size, column=0): """Not supported.""" pass # unsupported, but silently passed @staticmethod def row_factory(row): """Process rows before they are returned. You can overwrite this statically with a custom row factory, or you can build a row factory dynamically with build_row_factory(). For example, you can create a Cursor class that returns rows as Python dictionaries like this: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {desc[0]: value for desc, value in zip(self.description, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances """ raise NotImplementedError def build_row_factory(self): """Build a row factory based on the current description. This implementation builds a row factory for creating named tuples. You can overwrite this method if you want to dynamically create different row factories whenever the column description changes. """ names = self.colnames if names: return _row_factory(tuple(names)) CursorDescription = namedtuple('CursorDescription', ['name', 'type_code', 'display_size', 'internal_size', 'precision', 'scale', 'null_ok']) ### Connection Objects class Connection(object): """Connection object.""" # expose the exceptions as attributes on the connection object Error = Error Warning = Warning InterfaceError = InterfaceError DatabaseError = DatabaseError InternalError = InternalError OperationalError = OperationalError ProgrammingError = ProgrammingError IntegrityError = IntegrityError DataError = DataError NotSupportedError = NotSupportedError def __init__(self, cnx): """Create a database connection object.""" self._cnx = cnx # connection self._tnx = False # transaction state self.type_cache = TypeCache(cnx) self.cursor_type = Cursor self.autocommit = False try: self._cnx.source() except Exception: raise _op_error("Invalid connection") def __enter__(self): """Enter the runtime context for the connection object. The runtime context can be used for running transactions. This also starts a transaction in autocommit mode. """ if self.autocommit: try: self._cnx.source().execute("BEGIN") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't start transaction") else: self._tnx = True return self def __exit__(self, et, ev, tb): """Exit the runtime context for the connection object. This does not close the connection, but it ends a transaction. """ if et is None and ev is None and tb is None: self.commit() else: self.rollback() def close(self): """Close the connection object.""" if self._cnx: if self._tnx: try: self.rollback() except DatabaseError: pass self._cnx.close() self._cnx = None else: raise _op_error("Connection has been closed") @property def closed(self): """Check whether the connection has been closed or is broken.""" try: return not self._cnx or self._cnx.status != 1 except TypeError: return True def commit(self): """Commit any pending transaction to the database.""" if self._cnx: if self._tnx: self._tnx = False try: self._cnx.source().execute("COMMIT") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't commit transaction") else: raise _op_error("Connection has been closed") def rollback(self): """Roll back to the start of any pending transaction.""" if self._cnx: if self._tnx: self._tnx = False try: self._cnx.source().execute("ROLLBACK") except DatabaseError: raise # database provides error message except Exception: raise _op_error("Can't rollback transaction") else: raise _op_error("Connection has been closed") def cursor(self): """Return a new cursor object using the connection.""" if self._cnx: try: return self.cursor_type(self) except Exception: raise _op_error("Invalid connection") else: raise _op_error("Connection has been closed") if shortcutmethods: # otherwise do not implement and document this def execute(self, operation, params=None): """Shortcut method to run an operation on an implicit cursor.""" cursor = self.cursor() cursor.execute(operation, params) return cursor def executemany(self, operation, param_seq): """Shortcut method to run an operation against a sequence.""" cursor = self.cursor() cursor.executemany(operation, param_seq) return cursor ### Module Interface _connect = connect def connect(dsn=None, user=None, password=None, host=None, database=None, **kwargs): """Connect to a database.""" # first get params from DSN dbport = -1 dbhost = "" dbname = "" dbuser = "" dbpasswd = "" dbopt = "" try: params = dsn.split(":") dbhost = params[0] dbname = params[1] dbuser = params[2] dbpasswd = params[3] dbopt = params[4] except (AttributeError, IndexError, TypeError): pass # override if necessary if user is not None: dbuser = user if password is not None: dbpasswd = password if database is not None: dbname = database if host is not None: try: params = host.split(":") dbhost = params[0] dbport = int(params[1]) except (AttributeError, IndexError, TypeError, ValueError): pass # empty host is localhost if dbhost == "": dbhost = None if dbuser == "": dbuser = None # pass keyword arguments as connection info string if kwargs: kwargs = list(kwargs.items()) if '=' in dbname: dbname = [dbname] else: kwargs.insert(0, ('dbname', dbname)) dbname = [] for kw, value in kwargs: value = str(value) if not value or ' ' in value: value = "'%s'" % (value.replace( "'", "\\'").replace('\\', '\\\\'),) dbname.append('%s=%s' % (kw, value)) dbname = ' '.join(dbname) # open the connection cnx = _connect(dbname, dbhost, dbport, dbopt, dbuser, dbpasswd) return Connection(cnx) ### Types Handling class Type(frozenset): """Type class for a couple of PostgreSQL data types. PostgreSQL is object-oriented: types are dynamic. We must thus use type names as internal type codes. """ def __new__(cls, values): if isinstance(values, basestring): values = values.split() return super(Type, cls).__new__(cls, values) def __eq__(self, other): if isinstance(other, basestring): if other.startswith('_'): other = other[1:] return other in self else: return super(Type, self).__eq__(other) def __ne__(self, other): if isinstance(other, basestring): if other.startswith('_'): other = other[1:] return other not in self else: return super(Type, self).__ne__(other) class ArrayType: """Type class for PostgreSQL array types.""" def __eq__(self, other): if isinstance(other, basestring): return other.startswith('_') else: return isinstance(other, ArrayType) def __ne__(self, other): if isinstance(other, basestring): return not other.startswith('_') else: return not isinstance(other, ArrayType) class RecordType: """Type class for PostgreSQL record types.""" def __eq__(self, other): if isinstance(other, TypeCode): return other.type == 'c' elif isinstance(other, basestring): return other == 'record' else: return isinstance(other, RecordType) def __ne__(self, other): if isinstance(other, TypeCode): return other.type != 'c' elif isinstance(other, basestring): return other != 'record' else: return not isinstance(other, RecordType) # Mandatory type objects defined by DB-API 2 specs: STRING = Type('char bpchar name text varchar') BINARY = Type('bytea') NUMBER = Type('int2 int4 serial int8 float4 float8 numeric money') DATETIME = Type('date time timetz timestamp timestamptz interval' ' abstime reltime') # these are very old ROWID = Type('oid') # Additional type objects (more specific): BOOL = Type('bool') SMALLINT = Type('int2') INTEGER = Type('int2 int4 int8 serial') LONG = Type('int8') FLOAT = Type('float4 float8') NUMERIC = Type('numeric') MONEY = Type('money') DATE = Type('date') TIME = Type('time timetz') TIMESTAMP = Type('timestamp timestamptz') INTERVAL = Type('interval') UUID = Type('uuid') HSTORE = Type('hstore') JSON = Type('json jsonb') # Type object for arrays (also equate to their base types): ARRAY = ArrayType() # Type object for records (encompassing all composite types): RECORD = RecordType() # Mandatory type helpers defined by DB-API 2 specs: def Date(year, month, day): """Construct an object holding a date value.""" return date(year, month, day) def Time(hour, minute=0, second=0, microsecond=0, tzinfo=None): """Construct an object holding a time value.""" return time(hour, minute, second, microsecond, tzinfo) def Timestamp(year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None): """Construct an object holding a time stamp value.""" return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def DateFromTicks(ticks): """Construct an object holding a date value from the given ticks value.""" return Date(*localtime(ticks)[:3]) def TimeFromTicks(ticks): """Construct an object holding a time value from the given ticks value.""" return Time(*localtime(ticks)[3:6]) def TimestampFromTicks(ticks): """Construct an object holding a time stamp from the given ticks value.""" return Timestamp(*localtime(ticks)[:6]) class Binary(bytes): """Construct an object capable of holding a binary (long) string value.""" # Additional type helpers for PyGreSQL: def Interval(days, hours=0, minutes=0, seconds=0, microseconds=0): """Construct an object holding a time interval value.""" return timedelta(days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds) Uuid = Uuid # Construct an object holding a UUID value class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') _re_escape = regex(r'(["\\])') @classmethod def _quote(cls, s): if s is None: return 'NULL' if not s: return '""' quote = cls._re_quote.search(s) s = cls._re_escape.sub(r'\\\1', s) if quote: s = '"%s"' % (s,) return s def __str__(self): q = self._quote return ','.join('%s=>%s' % (q(k), q(v)) for k, v in self.items()) class Json: """Construct a wrapper for holding an object serializable to JSON.""" def __init__(self, obj, encode=None): self.obj = obj self.encode = encode or jsonencode def __str__(self): obj = self.obj if isinstance(obj, basestring): return obj return self.encode(obj) class Literal: """Construct a wrapper for holding a literal SQL string.""" def __init__(self, sql): self.sql = sql def __str__(self): return self.sql __pg_repr__ = __str__ # If run as script, print some information: if __name__ == '__main__': print('PyGreSQL version', version) print('') print(__doc__) pygresql-5.1.2/pginternal.c000066400000000000000000001254261365010227600157100ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * Internal functions - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* PyGreSQL internal types */ /* Simple types */ #define PYGRES_INT 1 #define PYGRES_LONG 2 #define PYGRES_FLOAT 3 #define PYGRES_DECIMAL 4 #define PYGRES_MONEY 5 #define PYGRES_BOOL 6 /* Text based types */ #define PYGRES_TEXT 8 #define PYGRES_BYTEA 9 #define PYGRES_JSON 10 #define PYGRES_OTHER 11 /* Array types */ #define PYGRES_ARRAY 16 /* Shared functions for encoding and decoding strings */ static PyObject * get_decoded_string(const char *str, Py_ssize_t size, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_DecodeUTF8(str, size, "strict"); if (encoding == pg_encoding_latin1) return PyUnicode_DecodeLatin1(str, size, "strict"); if (encoding == pg_encoding_ascii) return PyUnicode_DecodeASCII(str, size, "strict"); /* encoding name should be properly translated to Python here */ return PyUnicode_Decode(str, size, pg_encoding_to_char(encoding), "strict"); } static PyObject * get_encoded_string(PyObject *unicode_obj, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_AsUTF8String(unicode_obj); if (encoding == pg_encoding_latin1) return PyUnicode_AsLatin1String(unicode_obj); if (encoding == pg_encoding_ascii) return PyUnicode_AsASCIIString(unicode_obj); /* encoding name should be properly translated to Python here */ return PyUnicode_AsEncodedString(unicode_obj, pg_encoding_to_char(encoding), "strict"); } /* Helper functions */ /* Get PyGreSQL internal types for a PostgreSQL type. */ static int get_type(Oid pgtype) { int t; switch (pgtype) { /* simple types */ case INT2OID: case INT4OID: case CIDOID: case OIDOID: case XIDOID: t = PYGRES_INT; break; case INT8OID: t = PYGRES_LONG; break; case FLOAT4OID: case FLOAT8OID: t = PYGRES_FLOAT; break; case NUMERICOID: t = PYGRES_DECIMAL; break; case CASHOID: t = decimal_point ? PYGRES_MONEY : PYGRES_TEXT; break; case BOOLOID: t = PYGRES_BOOL; break; case BYTEAOID: t = bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA; break; case JSONOID: case JSONBOID: t = jsondecode ? PYGRES_JSON : PYGRES_TEXT; break; case BPCHAROID: case CHAROID: case TEXTOID: case VARCHAROID: case NAMEOID: case REGTYPEOID: t = PYGRES_TEXT; break; /* array types */ case INT2ARRAYOID: case INT4ARRAYOID: case CIDARRAYOID: case OIDARRAYOID: case XIDARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_INT | PYGRES_ARRAY); break; case INT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_LONG | PYGRES_ARRAY); break; case FLOAT4ARRAYOID: case FLOAT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_FLOAT | PYGRES_ARRAY); break; case NUMERICARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_DECIMAL | PYGRES_ARRAY); break; case MONEYARRAYOID: t = array_as_text ? PYGRES_TEXT : ((decimal_point ? PYGRES_MONEY : PYGRES_TEXT) | PYGRES_ARRAY); break; case BOOLARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_BOOL | PYGRES_ARRAY); break; case BYTEAARRAYOID: t = array_as_text ? PYGRES_TEXT : ((bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA) | PYGRES_ARRAY); break; case JSONARRAYOID: case JSONBARRAYOID: t = array_as_text ? PYGRES_TEXT : ((jsondecode ? PYGRES_JSON : PYGRES_TEXT) | PYGRES_ARRAY); break; case BPCHARARRAYOID: case CHARARRAYOID: case TEXTARRAYOID: case VARCHARARRAYOID: case NAMEARRAYOID: case REGTYPEARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_TEXT | PYGRES_ARRAY); break; default: t = PYGRES_OTHER; } return t; } /* Get PyGreSQL column types for all result columns. */ static int * get_col_types(PGresult *result, int nfields) { int *types, *t, j; if (!(types = PyMem_Malloc(sizeof(int) * (size_t) nfields))) { return (int*) PyErr_NoMemory(); } for (j = 0, t = types; j < nfields; ++j) { *t++ = get_type(PQftype(result, j)); } return types; } /* Cast a bytea encoded text based type to a Python object. This assumes the text is null-terminated character string. */ static PyObject * cast_bytea_text(char *s) { PyObject *obj; char *tmp_str; size_t str_len; /* this function should not be called when bytea_escaped is set */ tmp_str = (char *) PQunescapeBytea((unsigned char*) s, &str_len); obj = PyBytes_FromStringAndSize(tmp_str, (Py_ssize_t) str_len); if (tmp_str) { PQfreemem(tmp_str); } return obj; } /* Cast a text based type to a Python object. This needs the character string, size and encoding. */ static PyObject * cast_sized_text(char *s, Py_ssize_t size, int encoding, int type) { PyObject *obj, *tmp_obj; char *tmp_str; size_t str_len; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_BYTEA: /* this type should not be passed when bytea_escaped is set */ /* we need to add a null byte */ tmp_str = (char *) PyMem_Malloc((size_t) size + 1); if (!tmp_str) { return PyErr_NoMemory(); } memcpy(tmp_str, s, (size_t) size); s = tmp_str; *(s + size) = '\0'; tmp_str = (char *) PQunescapeBytea((unsigned char*) s, &str_len); PyMem_Free(s); if (!tmp_str) return PyErr_NoMemory(); obj = PyBytes_FromStringAndSize(tmp_str, (Py_ssize_t) str_len); if (tmp_str) { PQfreemem(tmp_str); } break; case PYGRES_JSON: /* this type should only be passed when jsondecode is set */ obj = get_decoded_string(s, size, encoding); if (obj && jsondecode) { /* was able to decode */ tmp_obj = Py_BuildValue("(O)", obj); obj = PyObject_CallObject(jsondecode, tmp_obj); Py_DECREF(tmp_obj); } break; default: /* PYGRES_TEXT */ #if IS_PY3 obj = get_decoded_string(s, size, encoding); if (!obj) /* cannot decode */ #endif obj = PyBytes_FromStringAndSize(s, size); } return obj; } /* Cast an arbitrary type to a Python object using a callback function. This needs the character string, size, encoding, the Postgres type and the external typecast function to be called. */ static PyObject * cast_other(char *s, Py_ssize_t size, int encoding, Oid pgtype, PyObject *cast_hook) { PyObject *obj; obj = cast_sized_text(s, size, encoding, PYGRES_TEXT); if (cast_hook) { PyObject *tmp_obj = obj; obj = PyObject_CallFunction(cast_hook, "(OI)", obj, pgtype); Py_DECREF(tmp_obj); } return obj; } /* Cast a simple type to a Python object. This needs a character string representation with a given size. */ static PyObject * cast_sized_simple(char *s, Py_ssize_t size, int type) { PyObject *obj, *tmp_obj; char buf[64], *t; int i, j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int) size < n) { n = (int) size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyInt_FromString(buf, NULL, 10); break; case PYGRES_LONG: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int) size < n) { n = (int) size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyLong_FromString(buf, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyStr_FromStringAndSize(s, size); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (i = 0, j = 0; i < size && j < n; ++i, ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } if (decimal) { buf[j] = '\0'; obj = PyObject_CallFunction(decimal, "(s)", buf); } else { tmp_obj = PyStr_FromString(buf); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_DECIMAL: tmp_obj = PyStr_FromStringAndSize(s, size); obj = decimal ? PyObject_CallFunctionObjArgs( decimal, tmp_obj, NULL) : PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyStr_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyStr_FromStringAndSize(s, size); } return obj; } /* Cast a simple type to a Python object. This needs a null-terminated character string representation. */ static PyObject * cast_unsized_simple(char *s, int type) { PyObject *obj, *tmp_obj; char buf[64]; int j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: obj = PyInt_FromString(s, NULL, 10); break; case PYGRES_LONG: obj = PyLong_FromString(s, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyStr_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (j = 0; *s && j < n; ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } buf[j] = '\0'; s = buf; /* FALLTHROUGH */ /* no break here */ case PYGRES_DECIMAL: if (decimal) { obj = PyObject_CallFunction(decimal, "(s)", s); } else { tmp_obj = PyStr_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyStr_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyStr_FromString(s); } return obj; } /* Quick case insensitive check if given sized string is null. */ #define STR_IS_NULL(s, n) (n == 4 && \ (s[0] == 'n' || s[0] == 'N') && \ (s[1] == 'u' || s[1] == 'U') && \ (s[2] == 'l' || s[2] == 'L') && \ (s[3] == 'l' || s[3] == 'L')) /* Cast string s with size and encoding to a Python list, using the input and output syntax for arrays. Use internal type or cast function to cast elements. The parameter delim specifies the delimiter for the elements, since some types do not use the default delimiter of a comma. */ static PyObject * cast_array(char *s, Py_ssize_t size, int encoding, int type, PyObject *cast, char delim) { PyObject *result, *stack[MAX_ARRAY_DEPTH]; char *end = s + size, *t; int depth, ranges = 0, level = 0; if (type) { type &= ~PYGRES_ARRAY; /* get the base type */ if (!type) type = PYGRES_TEXT; } if (!delim) { delim = ','; } else if (delim == '{' || delim =='}' || delim=='\\') { PyErr_SetString(PyExc_ValueError, "Invalid array delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (*s == '[') { /* dimension ranges */ int valid; for (valid = 0; !valid;) { if (s == end || *s++ != '[') break; while (s != end && *s == ' ') ++s; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ':') break; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ']') break; while (s != end && *s == ' ') ++s; ++ranges; if (s != end && *s == '=') { do ++s; while (s != end && *s == ' '); valid = 1; } } if (!valid) { PyErr_SetString(PyExc_ValueError, "Invalid array dimensions"); return NULL; } } for (t = s, depth = 0; t != end && (*t == '{' || *t == ' '); ++t) { if (*t == '{') ++depth; } if (!depth) { PyErr_SetString(PyExc_ValueError, "Array must start with a left brace"); return NULL; } if (ranges && depth != ranges) { PyErr_SetString(PyExc_ValueError, "Array dimensions do not match content"); return NULL; } if (depth > MAX_ARRAY_DEPTH) { PyErr_SetString(PyExc_ValueError, "Array is too deeply nested"); return NULL; } depth--; /* next level of parsing */ result = PyList_New(0); if (!result) return NULL; do ++s; while (s != end && *s == ' '); /* everything is set up, start parsing the array */ while (s != end) { if (*s == '}') { PyObject *subresult; if (!level) break; /* top level array ended */ do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray expected but not found"); Py_DECREF(result); return NULL; } } else if (*s != '}') break; /* error */ subresult = result; result = stack[--level]; if (PyList_Append(result, subresult)) { Py_DECREF(result); return NULL; } } else if (level == depth) { /* we expect elements at this level */ PyObject *element; char *estr; Py_ssize_t esize; int escaped = 0; if (*s == '{') { PyErr_SetString(PyExc_ValueError, "Subarray found where not expected"); Py_DECREF(result); return NULL; } if (*s == '"') { /* quoted element */ estr = ++s; while (s != end && *s != '"') { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } esize = s - estr; do ++s; while (s != end && *s == ' '); } else { /* unquoted element */ estr = s; /* can contain blanks inside */ while (s != end && *s != '"' && *s != '{' && *s != '}' && *s != delim) { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } t = s; while (t > estr && *(t - 1) == ' ') --t; if (!(esize = t - estr)) { s = end; break; /* error */ } if (STR_IS_NULL(estr, esize)) /* NULL gives None */ estr = NULL; } if (s == end) break; /* error */ if (estr) { if (escaped) { char *r; Py_ssize_t i; /* create unescaped string */ t = estr; estr = (char *) PyMem_Malloc((size_t) esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } for (i = 0, r = estr; i < esize; ++i) { if (*t == '\\') ++t, ++i; *r++ = *t++; } esize = r - estr; } if (type) { /* internal casting of base type */ if (type & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, type); else element = cast_sized_simple(estr, esize, type); } else { /* external casting of base type */ #if IS_PY3 element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) /* no decoding necessary or possible */ #endif element = PyBytes_FromStringAndSize(estr, esize); if (element && cast) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( cast, element, NULL); Py_DECREF(tmp); } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); element = Py_None; } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ } else if (*s != '}') break; /* error */ } else { /* we expect arrays at this level */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray must start with a left brace"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ stack[level++] = result; if (!(result = PyList_New(0))) return NULL; } } if (s == end || *s != '}') { PyErr_SetString(PyExc_ValueError, "Unexpected end of array"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of array"); Py_DECREF(result); return NULL; } return result; } /* Cast string s with size and encoding to a Python tuple. using the input and output syntax for composite types. Use array of internal types or cast function or sequence of cast functions to cast elements. The parameter len is the record size. The parameter delim can specify a delimiter for the elements, although composite types always use a comma as delimiter. */ static PyObject * cast_record(char *s, Py_ssize_t size, int encoding, int *type, PyObject *cast, Py_ssize_t len, char delim) { PyObject *result, *ret; char *end = s + size, *t; Py_ssize_t i; if (!delim) { delim = ','; } else if (delim == '(' || delim ==')' || delim=='\\') { PyErr_SetString(PyExc_ValueError, "Invalid record delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (s == end || *s != '(') { PyErr_SetString(PyExc_ValueError, "Record must start with a left parenthesis"); return NULL; } result = PyList_New(0); if (!result) return NULL; i = 0; /* everything is set up, start parsing the record */ while (++s != end) { PyObject *element; if (*s == ')' || *s == delim) { Py_INCREF(Py_None); element = Py_None; } else { char *estr; Py_ssize_t esize; int quoted = 0, escaped = 0; estr = s; quoted = *s == '"'; if (quoted) ++s; esize = 0; while (s != end) { if (!quoted && (*s == ')' || *s == delim)) break; if (*s == '"') { ++s; if (s == end) break; if (!(quoted && *s == '"')) { quoted = !quoted; continue; } } if (*s == '\\') { ++s; if (s == end) break; } ++s, ++esize; } if (s == end) break; /* error */ if (estr + esize != s) { char *r; escaped = 1; /* create unescaped string */ t = estr; estr = (char *) PyMem_Malloc((size_t) esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } quoted = 0; r = estr; while (t != s) { if (*t == '"') { ++t; if (!(quoted && *t == '"')) { quoted = !quoted; continue; } } if (*t == '\\') ++t; *r++ = *t++; } } if (type) { /* internal casting of element type */ int etype = type[i]; if (etype & PYGRES_ARRAY) element = cast_array( estr, esize, encoding, etype, NULL, 0); else if (etype & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, etype); else element = cast_sized_simple(estr, esize, etype); } else { /* external casting of base type */ #if IS_PY3 element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) /* no decoding necessary or possible */ #endif element = PyBytes_FromStringAndSize(estr, esize); if (element && cast) { if (len) { PyObject *ecast = PySequence_GetItem(cast, i); if (ecast) { if (ecast != Py_None) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( ecast, element, NULL); Py_DECREF(tmp); } } else { Py_DECREF(element); element = NULL; } } else { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( cast, element, NULL); Py_DECREF(tmp); } } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (len) ++i; if (*s != delim) break; /* no next record */ if (len && i >= len) { PyErr_SetString(PyExc_ValueError, "Too many columns"); Py_DECREF(result); return NULL; } } if (s == end || *s != ')') { PyErr_SetString(PyExc_ValueError, "Unexpected end of record"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of record"); Py_DECREF(result); return NULL; } if (len && i < len) { PyErr_SetString(PyExc_ValueError, "Too few columns"); Py_DECREF(result); return NULL; } ret = PyList_AsTuple(result); Py_DECREF(result); return ret; } /* Cast string s with size and encoding to a Python dictionary. using the input and output syntax for hstore values. */ static PyObject * cast_hstore(char *s, Py_ssize_t size, int encoding) { PyObject *result; char *end = s + size; result = PyDict_New(); /* everything is set up, start parsing the record */ while (s != end) { char *key, *val; PyObject *key_obj, *val_obj; Py_ssize_t key_esc = 0, val_esc = 0, size; int quoted; while (s != end && *s == ' ') ++s; if (s == end) break; quoted = *s == '"'; if (quoted) { key = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { key = s; while (s != end) { if (*s == '=' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == key) { PyErr_SetString(PyExc_ValueError, "Missing key"); Py_DECREF(result); return NULL; } } size = s - key - key_esc; if (key_esc) { char *r = key, *t; key = (char *) PyMem_Malloc((size_t) size); if (!key) { Py_DECREF(result); return PyErr_NoMemory(); } t = key; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } key_obj = cast_sized_text(key, size, encoding, PYGRES_TEXT); if (key_esc) PyMem_Free(key); if (!key_obj) { Py_DECREF(result); return NULL; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s == end || *s++ != '=' || s == end || *s++ != '>') { PyErr_SetString(PyExc_ValueError, "Invalid characters after key"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; quoted = *s == '"'; if (quoted) { val = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { val = s; while (s != end) { if (*s == ',' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == val) { PyErr_SetString(PyExc_ValueError, "Missing value"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } if (STR_IS_NULL(val, s - val)) val = NULL; } if (val) { size = s - val - val_esc; if (val_esc) { char *r = val, *t; val = (char *) PyMem_Malloc((size_t) size); if (!val) { Py_DECREF(key_obj); Py_DECREF(result); return PyErr_NoMemory(); } t = val; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } val_obj = cast_sized_text(val, size, encoding, PYGRES_TEXT); if (val_esc) PyMem_Free(val); if (!val_obj) { Py_DECREF(key_obj); Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); val_obj = Py_None; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s != end) { if (*s++ != ',') { PyErr_SetString(PyExc_ValueError, "Invalid characters after val"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; if (s == end) { PyErr_SetString(PyExc_ValueError, "Missing entry"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } } PyDict_SetItem(result, key_obj, val_obj); Py_DECREF(key_obj); Py_DECREF(val_obj); } return result; } /* Get appropriate error type from sqlstate. */ static PyObject * get_error_type(const char *sqlstate) { switch (sqlstate[0]) { case '0': switch (sqlstate[1]) { case 'A': return NotSupportedError; } break; case '2': switch (sqlstate[1]) { case '0': case '1': return ProgrammingError; case '2': return DataError; case '3': return IntegrityError; case '4': case '5': return InternalError; case '6': case '7': case '8': return OperationalError; case 'B': case 'D': case 'F': return InternalError; } break; case '3': switch (sqlstate[1]) { case '4': return OperationalError; case '8': case '9': case 'B': return InternalError; case 'D': case 'F': return ProgrammingError; } break; case '4': switch (sqlstate[1]) { case '0': return OperationalError; case '2': case '4': return ProgrammingError; } break; case '5': case 'H': return OperationalError; case 'F': case 'P': case 'X': return InternalError; } return DatabaseError; } /* Set database error message and sqlstate attribute. */ static void set_error_msg_and_state(PyObject *type, const char *msg, int encoding, const char *sqlstate) { PyObject *err_obj, *msg_obj, *sql_obj = NULL; #if IS_PY3 if (encoding == -1) /* unknown */ msg_obj = PyUnicode_DecodeLocale(msg, NULL); else msg_obj = get_decoded_string(msg, (Py_ssize_t) strlen(msg), encoding); if (!msg_obj) /* cannot decode */ #endif msg_obj = PyBytes_FromString(msg); if (sqlstate) { sql_obj = PyStr_FromStringAndSize(sqlstate, 5); } else { Py_INCREF(Py_None); sql_obj = Py_None; } err_obj = PyObject_CallFunctionObjArgs(type, msg_obj, NULL); if (err_obj) { Py_DECREF(msg_obj); PyObject_SetAttrString(err_obj, "sqlstate", sql_obj); Py_DECREF(sql_obj); PyErr_SetObject(type, err_obj); Py_DECREF(err_obj); } else { PyErr_SetString(type, msg); } } /* Set given database error message. */ static void set_error_msg(PyObject *type, const char *msg) { set_error_msg_and_state(type, msg, pg_encoding_ascii, NULL); } /* Set database error from connection and/or result. */ static void set_error(PyObject *type, const char * msg, PGconn *cnx, PGresult *result) { char *sqlstate = NULL; int encoding = pg_encoding_ascii; if (cnx) { char *err_msg = PQerrorMessage(cnx); if (err_msg) { msg = err_msg; encoding = PQclientEncoding(cnx); } } if (result) { sqlstate = PQresultErrorField(result, PG_DIAG_SQLSTATE); if (sqlstate) type = get_error_type(sqlstate); } set_error_msg_and_state(type, msg, encoding, sqlstate); } #ifdef SSL_INFO /* Get SSL attributes and values as a dictionary. */ static PyObject * get_ssl_attributes(PGconn *cnx) { PyObject *attr_dict = NULL; const char * const *s; if (!(attr_dict = PyDict_New())) { return NULL; } for (s = PQsslAttributeNames(cnx); *s; ++s) { const char *val = PQsslAttribute(cnx, *s); if (val) { PyObject * val_obj = PyStr_FromString(val); PyDict_SetItemString(attr_dict, *s, val_obj); Py_DECREF(val_obj); } else { PyDict_SetItemString(attr_dict, *s, Py_None); } } return attr_dict; } #endif /* SSL_INFO */ /* Format result (mostly useful for debugging). Note: This is similar to the Postgres function PQprint(). PQprint() is not used because handing over a stream from Python to PostgreSQL can be problematic if they use different libs for streams and because using PQprint() and tp_print is not recommended any more. */ static PyObject * format_result(const PGresult *res) { const int n = PQnfields(res); if (n > 0) { char * const aligns = (char *) PyMem_Malloc( (unsigned int) n * sizeof(char)); size_t * const sizes = (size_t *) PyMem_Malloc( (unsigned int) n * sizeof(size_t)); if (aligns && sizes) { const int m = PQntuples(res); int i, j; size_t size; char *buffer; /* calculate sizes and alignments */ for (j = 0; j < n; ++j) { const char * const s = PQfname(res, j); const int format = PQfformat(res, j); sizes[j] = s ? strlen(s) : 0; if (format) { aligns[j] = '\0'; if (m && sizes[j] < 8) /* "" must fit */ sizes[j] = 8; } else { const Oid ftype = PQftype(res, j); switch (ftype) { case INT2OID: case INT4OID: case INT8OID: case FLOAT4OID: case FLOAT8OID: case NUMERICOID: case OIDOID: case XIDOID: case CIDOID: case CASHOID: aligns[j] = 'r'; break; default: aligns[j] = 'l'; } } } for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { if (aligns[j]) { const int k = PQgetlength(res, i, j); if (sizes[j] < (size_t) k) /* value must fit */ sizes[j] = (size_t) k; } } } size = 0; /* size of one row */ for (j = 0; j < n; ++j) size += sizes[j] + 1; /* times number of rows incl. heading */ size *= (size_t) m + 2; /* plus size of footer */ size += 40; /* is the buffer size that needs to be allocated */ buffer = (char *) PyMem_Malloc(size); if (buffer) { char *p = buffer; PyObject *result; /* create the header */ for (j = 0; j < n; ++j) { const char * const s = PQfname(res, j); const size_t k = sizes[j]; const size_t h = (k - (size_t) strlen(s)) / 2; sprintf(p, "%*s", (int) h, ""); sprintf(p + h, "%-*s", (int) (k - h), s); p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; for (j = 0; j < n; ++j) { size_t k = sizes[j]; while (k--) *p++ = '-'; if (j + 1 < n) *p++ = '+'; } *p++ = '\n'; /* create the body */ for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { const char align = aligns[j]; const size_t k = sizes[j]; if (align) { sprintf(p, align == 'r' ? "%*s" : "%-*s", (int) k, PQgetvalue(res, i, j)); } else { sprintf(p, "%-*s", (int) k, PQgetisnull(res, i, j) ? "" : ""); } p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; } /* free memory */ PyMem_Free(aligns); PyMem_Free(sizes); /* create the footer */ sprintf(p, "(%d row%s)", m, m == 1 ? "" : "s"); /* return the result */ result = PyStr_FromString(buffer); PyMem_Free(buffer); return result; } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else return PyStr_FromString("(nothing selected)"); } /* Internal function converting a Postgres datestyles to date formats. */ static const char * date_style_to_format(const char *s) { static const char *formats[] = { "%Y-%m-%d", /* 0 = ISO */ "%m-%d-%Y", /* 1 = Postgres, MDY */ "%d-%m-%Y", /* 2 = Postgres, DMY */ "%m/%d/%Y", /* 3 = SQL, MDY */ "%d/%m/%Y", /* 4 = SQL, DMY */ "%d.%m.%Y" /* 5 = German */ }; switch (s ? *s : 'I') { case 'P': /* Postgres */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 2 : 1]; case 'S': /* SQL */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 4 : 3]; case 'G': /* German */ return formats[5]; default: /* ISO */ return formats[0]; /* ISO is the default */ } } /* Internal function converting a date format to a Postgres datestyle. */ static const char * date_format_to_style(const char *s) { static const char *datestyle[] = { "ISO, YMD", /* 0 = %Y-%m-%d */ "Postgres, MDY", /* 1 = %m-%d-%Y */ "Postgres, DMY", /* 2 = %d-%m-%Y */ "SQL, MDY", /* 3 = %m/%d/%Y */ "SQL, DMY", /* 4 = %d/%m/%Y */ "German, DMY" /* 5 = %d.%m.%Y */ }; switch (s ? s[1] : 'Y') { case 'm': switch (s[2]) { case '/': return datestyle[3]; /* SQL, MDY */ default: return datestyle[1]; /* Postgres, MDY */ } case 'd': switch (s[2]) { case '/': return datestyle[4]; /* SQL, DMY */ case '.': return datestyle[5]; /* German */ default: return datestyle[2]; /* Postgres, DMY */ } default: return datestyle[0]; /* ISO */ } } /* Internal wrapper for the notice receiver callback. */ static void notice_receiver(void *arg, const PGresult *res) { PyGILState_STATE gstate = PyGILState_Ensure(); connObject *self = (connObject*) arg; PyObject *func = self->notice_receiver; if (func) { noticeObject *notice = PyObject_New(noticeObject, ¬iceType); PyObject *ret; if (notice) { notice->pgcnx = arg; notice->res = res; } else { Py_INCREF(Py_None); notice = (noticeObject *)(void *) Py_None; } ret = PyObject_CallFunction(func, "(O)", notice); Py_XDECREF(ret); } PyGILState_Release(gstate); } pygresql-5.1.2/pglarge.c000066400000000000000000000313451365010227600151620ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * Large object support - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate large object. */ static void large_dealloc(largeObject *self) { if (self->lo_fd >= 0 && self->pgcnx->valid) lo_close(self->pgcnx->cnx, self->lo_fd); Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return large object as string in human readable form. */ static PyObject * large_str(largeObject *self) { char str[80]; sprintf(str, self->lo_fd >= 0 ? "Opened large object, oid %ld" : "Closed large object, oid %ld", (long) self->lo_oid); return PyStr_FromString(str); } /* Check validity of large object. */ static int _check_lo_obj(largeObject *self, int level) { if (!_check_cnx_obj(self->pgcnx)) return 0; if (!self->lo_oid) { set_error_msg(IntegrityError, "Object is not valid (null oid)"); return 0; } if (level & CHECK_OPEN) { if (self->lo_fd < 0) { PyErr_SetString(PyExc_IOError, "Object is not opened"); return 0; } } if (level & CHECK_CLOSE) { if (self->lo_fd >= 0) { PyErr_SetString(PyExc_IOError, "Object is already opened"); return 0; } } return 1; } /* Get large object attributes. */ static PyObject * large_getattr(largeObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* list postgreSQL large object fields */ /* associated pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_lo_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *) (self->pgcnx); } PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* large object oid */ if (!strcmp(name, "oid")) { if (_check_lo_obj(self, 0)) return PyInt_FromLong(self->lo_oid); PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* error (status) message */ if (!strcmp(name, "error")) return PyStr_FromString(PQerrorMessage(self->pgcnx->cnx)); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Get the list of large object attributes. */ static PyObject * large_dir(largeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sss]", "oid", "pgcnx", "error"); return attrs; } /* Open large object. */ static char large_open__doc__[] = "open(mode) -- open access to large object with specified mode\n\n" "The mode must be one of INV_READ, INV_WRITE (module level constants).\n"; static PyObject * large_open(largeObject *self, PyObject *args) { int mode, fd; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "The open() method takes an integer argument"); return NULL; } /* check validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* opens large object */ if ((fd = lo_open(self->pgcnx->cnx, self->lo_oid, mode)) == -1) { PyErr_SetString(PyExc_IOError, "Can't open large object"); return NULL; } self->lo_fd = fd; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Close large object. */ static char large_close__doc__[] = "close() -- close access to large object data"; static PyObject * large_close(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* closes large object */ if (lo_close(self->pgcnx->cnx, self->lo_fd)) { PyErr_SetString(PyExc_IOError, "Error while closing large object fd"); return NULL; } self->lo_fd = -1; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Read from large object. */ static char large_read__doc__[] = "read(size) -- read from large object to sized string\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_read(largeObject *self, PyObject *args) { int size; PyObject *buffer; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &size)) { PyErr_SetString(PyExc_TypeError, "Method read() takes an integer argument"); return NULL; } if (size <= 0) { PyErr_SetString(PyExc_ValueError, "Method read() takes a positive integer as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* allocate buffer and runs read */ buffer = PyBytes_FromStringAndSize((char *) NULL, size); if ((size = lo_read(self->pgcnx->cnx, self->lo_fd, PyBytes_AS_STRING((PyBytesObject *) (buffer)), (size_t) size)) == -1) { PyErr_SetString(PyExc_IOError, "Error while reading"); Py_XDECREF(buffer); return NULL; } /* resize buffer and returns it */ _PyBytes_Resize(&buffer, size); return buffer; } /* Write to large object. */ static char large_write__doc__[] = "write(string) -- write sized string to large object\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_write(largeObject *self, PyObject *args) { char *buffer; int size; Py_ssize_t bufsize; /* gets arguments */ if (!PyArg_ParseTuple(args, "s#", &buffer, &bufsize)) { PyErr_SetString(PyExc_TypeError, "Method write() expects a sized string as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((size = lo_write(self->pgcnx->cnx, self->lo_fd, buffer, (size_t) bufsize)) != bufsize) { PyErr_SetString(PyExc_IOError, "Buffer truncated during write"); return NULL; } /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Go to position in large object. */ static char large_seek__doc__[] = "seek(offset, whence) -- move to specified position\n\n" "Object must be opened before calling this method. The whence option\n" "can be SEEK_SET, SEEK_CUR or SEEK_END (module level constants).\n"; static PyObject * large_seek(largeObject *self, PyObject *args) { /* offset and whence are initialized to keep compiler happy */ int ret, offset = 0, whence = 0; /* gets arguments */ if (!PyArg_ParseTuple(args, "ii", &offset, &whence)) { PyErr_SetString(PyExc_TypeError, "Method lseek() expects two integer arguments"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((ret = lo_lseek( self->pgcnx->cnx, self->lo_fd, offset, whence)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving cursor"); return NULL; } /* returns position */ return PyInt_FromLong(ret); } /* Get large object size. */ static char large_size__doc__[] = "size() -- return large object size\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_size(largeObject *self, PyObject *noargs) { int start, end; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting current position"); return NULL; } /* gets end position */ if ((end = lo_lseek(self->pgcnx->cnx, self->lo_fd, 0, SEEK_END)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting end position"); return NULL; } /* move back to start position */ if ((start = lo_lseek( self->pgcnx->cnx, self->lo_fd, start, SEEK_SET)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving back to first position"); return NULL; } /* returns size */ return PyInt_FromLong(end); } /* Get large object cursor position. */ static char large_tell__doc__[] = "tell() -- give current position in large object\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_tell(largeObject *self, PyObject *noargs) { int start; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting position"); return NULL; } /* returns size */ return PyInt_FromLong(start); } /* Export large object as unix file. */ static char large_export__doc__[] = "export(filename) -- export large object data to specified file\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_export(largeObject *self, PyObject *args) { char *name; /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "The method export() takes a filename as argument"); return NULL; } /* runs command */ if (lo_export(self->pgcnx->cnx, self->lo_oid, name) != 1) { PyErr_SetString(PyExc_IOError, "Error while exporting large object"); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Delete a large object. */ static char large_unlink__doc__[] = "unlink() -- destroy large object\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_unlink(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* deletes the object, invalidate it on success */ if (lo_unlink(self->pgcnx->cnx, self->lo_oid) != 1) { PyErr_SetString(PyExc_IOError, "Error while unlinking large object"); return NULL; } self->lo_oid = 0; Py_INCREF(Py_None); return Py_None; } /* Large object methods */ static struct PyMethodDef large_methods[] = { {"__dir__", (PyCFunction) large_dir, METH_NOARGS, NULL}, {"open", (PyCFunction) large_open, METH_VARARGS, large_open__doc__}, {"close", (PyCFunction) large_close, METH_NOARGS, large_close__doc__}, {"read", (PyCFunction) large_read, METH_VARARGS, large_read__doc__}, {"write", (PyCFunction) large_write, METH_VARARGS, large_write__doc__}, {"seek", (PyCFunction) large_seek, METH_VARARGS, large_seek__doc__}, {"size", (PyCFunction) large_size, METH_NOARGS, large_size__doc__}, {"tell", (PyCFunction) large_tell, METH_NOARGS, large_tell__doc__}, {"export",(PyCFunction) large_export, METH_VARARGS, large_export__doc__}, {"unlink",(PyCFunction) large_unlink, METH_NOARGS, large_unlink__doc__}, {NULL, NULL} }; static char large__doc__[] = "PostgreSQL large object"; /* Large object type definition */ static PyTypeObject largeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.LargeObject", /* tp_name */ sizeof(largeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) large_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) large_str, /* tp_str */ (getattrofunc) large_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ large__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ large_methods, /* tp_methods */ }; pygresql-5.1.2/pgmodule.c000066400000000000000000001152771365010227600153640ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * This is the main file for the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Note: This should be linked against the same C runtime lib as Python */ #define PY_SSIZE_T_CLEAN #include #include #include /* The type definitions from */ #include "pgtypes.h" /* Macros for single-source Python 2/3 compatibility */ #include "py3c.h" static PyObject *Error, *Warning, *InterfaceError, *DatabaseError, *InternalError, *OperationalError, *ProgrammingError, *IntegrityError, *DataError, *NotSupportedError, *InvalidResultError, *NoResultError, *MultipleResultsError; #define _TOSTRING(x) #x #define TOSTRING(x) _TOSTRING(x) static const char *PyPgVersion = TOSTRING(PYGRESQL_VERSION); #if SIZEOF_SIZE_T != SIZEOF_INT #define Py_InitModule4 Py_InitModule4_64 #endif /* Default values */ #define PG_ARRAYSIZE 1 /* Flags for object validity checks */ #define CHECK_OPEN 1 #define CHECK_CLOSE 2 #define CHECK_CNX 4 #define CHECK_RESULT 8 #define CHECK_DQL 16 /* Query result types */ #define RESULT_EMPTY 1 #define RESULT_DML 2 #define RESULT_DDL 3 #define RESULT_DQL 4 /* Flags for move methods */ #define QUERY_MOVEFIRST 1 #define QUERY_MOVELAST 2 #define QUERY_MOVENEXT 3 #define QUERY_MOVEPREV 4 #define MAX_BUFFER_SIZE 8192 /* maximum transaction size */ #define MAX_ARRAY_DEPTH 16 /* maximum allowed depth of an array */ /* MODULE GLOBAL VARIABLES */ #ifdef DEFAULT_VARS static PyObject *pg_default_host; /* default database host */ static PyObject *pg_default_base; /* default database name */ static PyObject *pg_default_opt; /* default connection options */ static PyObject *pg_default_port; /* default connection port */ static PyObject *pg_default_user; /* default username */ static PyObject *pg_default_passwd; /* default password */ #endif /* DEFAULT_VARS */ static PyObject *decimal = NULL, /* decimal type */ *dictiter = NULL, /* function for getting named results */ *namediter = NULL, /* function for getting named results */ *namednext = NULL, /* function for getting one named result */ *scalariter = NULL, /* function for getting scalar results */ *jsondecode = NULL; /* function for decoding json strings */ static const char *date_format = NULL; /* date format that is always assumed */ static char decimal_point = '.'; /* decimal point used in money values */ static int bool_as_text = 0; /* whether bool shall be returned as text */ static int array_as_text = 0; /* whether arrays shall be returned as text */ static int bytea_escaped = 0; /* whether bytea shall be returned escaped */ static int pg_encoding_utf8 = 0; static int pg_encoding_latin1 = 0; static int pg_encoding_ascii = 0; /* OBJECTS ======= Each object has a number of elements. The naming scheme will be based on the object type. Here are the elements using example object type "foo". - fooType: Type definition for object. - fooObject: A structure to hold local object information. - foo_methods: Methods declaration. - foo_method_name: Object methods. The objects that we need to create: - pg: The module itself. - conn: Connection object returned from pg.connect(). - notice: Notice object returned from pg.notice(). - large: Large object returned by pg.conn.locreate() and pg.conn.loimport(). - query: Query object returned by pg.conn.query(). - source: Source object returned by pg.conn.source(). */ /* Forward declarations for types */ static PyTypeObject connType, sourceType, queryType, noticeType, largeType; /* Forward static declarations */ static void notice_receiver(void *, const PGresult *); /* Object declarations */ typedef struct { PyObject_HEAD int valid; /* validity flag */ PGconn *cnx; /* Postgres connection handle */ const char *date_format; /* date format derived from datestyle */ PyObject *cast_hook; /* external typecast method */ PyObject *notice_receiver; /* current notice receiver */ } connObject; #define is_connObject(v) (PyType(v) == &connType) typedef struct { PyObject_HEAD int valid; /* validity flag */ connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int encoding; /* client encoding */ int result_type; /* result type (DDL/DML/DQL) */ long arraysize; /* array size for fetch method */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ } sourceObject; #define is_sourceObject(v) (PyType(v) == &sourceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult const *res; /* an error or warning */ } noticeObject; #define is_noticeObject(v) (PyType(v) == ¬iceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int encoding; /* client encoding */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ int *col_types; /* PyGreSQL column types */ } queryObject; #define is_queryObject(v) (PyType(v) == &queryType) #ifdef LARGE_OBJECTS typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ Oid lo_oid; /* large object oid */ int lo_fd; /* large object fd */ } largeObject; #define is_largeObject(v) (PyType(v) == &largeType) #endif /* LARGE_OBJECTS */ /* Internal functions */ #include "pginternal.c" /* Connection object */ #include "pgconn.c" /* Query object */ #include "pgquery.c" /* Source object */ #include "pgsource.c" /* Notice object */ #include "pgnotice.c" /* Large objects */ #ifdef LARGE_OBJECTS #include "pglarge.c" #endif /* MODULE FUNCTIONS */ /* Connect to a database. */ static char pg_connect__doc__[] = "connect(dbname, host, port, opt) -- connect to a PostgreSQL database\n\n" "The connection uses the specified parameters (optional, keywords aware).\n"; static PyObject * pg_connect(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = { "dbname", "host", "port", "opt", "user", "passwd", NULL }; char *pghost, *pgopt, *pgdbname, *pguser, *pgpasswd; int pgport; char port_buffer[20]; connObject *conn_obj; pghost = pgopt = pgdbname = pguser = pgpasswd = NULL; pgport = -1; /* * parses standard arguments With the right compiler warnings, this * will issue a diagnostic. There is really no way around it. If I * don't declare kwlist as const char *kwlist[] then it complains when * I try to assign all those constant strings to it. */ if (!PyArg_ParseTupleAndKeywords( args, dict, "|zzizzz", (char**)kwlist, &pgdbname, &pghost, &pgport, &pgopt, &pguser, &pgpasswd)) { return NULL; } #ifdef DEFAULT_VARS /* handles defaults variables (for uninitialised vars) */ if ((!pghost) && (pg_default_host != Py_None)) pghost = PyBytes_AsString(pg_default_host); if ((pgport == -1) && (pg_default_port != Py_None)) pgport = (int) PyInt_AsLong(pg_default_port); if ((!pgopt) && (pg_default_opt != Py_None)) pgopt = PyBytes_AsString(pg_default_opt); if ((!pgdbname) && (pg_default_base != Py_None)) pgdbname = PyBytes_AsString(pg_default_base); if ((!pguser) && (pg_default_user != Py_None)) pguser = PyBytes_AsString(pg_default_user); if ((!pgpasswd) && (pg_default_passwd != Py_None)) pgpasswd = PyBytes_AsString(pg_default_passwd); #endif /* DEFAULT_VARS */ if (!(conn_obj = PyObject_New(connObject, &connType))) { set_error_msg(InternalError, "Can't create new connection object"); return NULL; } conn_obj->valid = 1; conn_obj->cnx = NULL; conn_obj->date_format = date_format; conn_obj->cast_hook = NULL; conn_obj->notice_receiver = NULL; if (pgport != -1) { memset(port_buffer, 0, sizeof(port_buffer)); sprintf(port_buffer, "%d", pgport); } Py_BEGIN_ALLOW_THREADS conn_obj->cnx = PQsetdbLogin(pghost, pgport == -1 ? NULL : port_buffer, pgopt, NULL, pgdbname, pguser, pgpasswd); Py_END_ALLOW_THREADS if (PQstatus(conn_obj->cnx) == CONNECTION_BAD) { set_error(InternalError, "Cannot connect", conn_obj->cnx, NULL); Py_XDECREF(conn_obj); return NULL; } return (PyObject *) conn_obj; } /* Escape string */ static char pg_escape_string__doc__[] = "escape_string(string) -- escape a string for use within SQL"; static PyObject * pg_escape_string(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2 * (size_t) from_length + 1; if ((Py_ssize_t ) to_length < from_length) { /* overflow */ to_length = (size_t) from_length; from_length = (from_length - 1)/2; } to = (char *) PyMem_Malloc(to_length); to_length = (size_t) PQescapeString(to, from, (size_t) from_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char pg_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type bytea"; static PyObject * pg_escape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *) PQescapeBytea( (unsigned char*) from, (size_t) from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length - 1); else to_obj = get_decoded_string(to, (Py_ssize_t) to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } /* Unescape bytea */ static char pg_unescape_bytea__doc__[] = "unescape_bytea(string) -- unescape bytea data retrieved as text"; static PyObject * pg_unescape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { tmp_obj = get_encoded_string(data, pg_encoding_ascii); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method unescape_bytea() expects a string as argument"); return NULL; } to = (char *) PQunescapeBytea((unsigned char*) from, &to_length); Py_XDECREF(tmp_obj); if (!to) return PyErr_NoMemory(); to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t) to_length); PQfreemem(to); return to_obj; } /* Set fixed datestyle. */ static char pg_set_datestyle__doc__[] = "set_datestyle(style) -- set which style is assumed"; static PyObject * pg_set_datestyle(PyObject *self, PyObject *args) { const char *datestyle = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &datestyle)) { PyErr_SetString( PyExc_TypeError, "Function set_datestyle() expects a string or None as argument"); return NULL; } date_format = datestyle ? date_style_to_format(datestyle) : NULL; Py_INCREF(Py_None); return Py_None; } /* Get fixed datestyle. */ static char pg_get_datestyle__doc__[] = "get_datestyle() -- get which date style is assumed"; static PyObject * pg_get_datestyle(PyObject *self, PyObject *noargs) { if (date_format) { return PyStr_FromString(date_format_to_style(date_format)); } else { Py_INCREF(Py_None); return Py_None; } } /* Get decimal point. */ static char pg_get_decimal_point__doc__[] = "get_decimal_point() -- get decimal point to be used for money values"; static PyObject * pg_get_decimal_point(PyObject *self, PyObject *noargs) { PyObject *ret; char s[2]; if (decimal_point) { s[0] = decimal_point; s[1] = '\0'; ret = PyStr_FromString(s); } else { Py_INCREF(Py_None); ret = Py_None; } return ret; } /* Set decimal point. */ static char pg_set_decimal_point__doc__[] = "set_decimal_point(char) -- set decimal point to be used for money values"; static PyObject * pg_set_decimal_point(PyObject *self, PyObject *args) { PyObject *ret = NULL; char *s = NULL; /* gets arguments */ if (PyArg_ParseTuple(args, "z", &s)) { if (!s) s = "\0"; else if (*s && (*(s+1) || !strchr(".,;: '*/_`|", *s))) s = NULL; } if (s) { decimal_point = *s; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal_mark() expects" " a decimal mark character as argument"); } return ret; } /* Get decimal type. */ static char pg_get_decimal__doc__[] = "get_decimal() -- get the decimal type to be used for numeric values"; static PyObject * pg_get_decimal(PyObject *self, PyObject *noargs) { PyObject *ret; ret = decimal ? decimal : Py_None; Py_INCREF(ret); return ret; } /* Set decimal type. */ static char pg_set_decimal__doc__[] = "set_decimal(cls) -- set a decimal type to be used for numeric values"; static PyObject * pg_set_decimal(PyObject *self, PyObject *cls) { PyObject *ret = NULL; if (cls == Py_None) { Py_XDECREF(decimal); decimal = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(cls)) { Py_XINCREF(cls); Py_XDECREF(decimal); decimal = cls; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal() expects" " a callable or None as argument"); } return ret; } /* Get usage of bool values. */ static char pg_get_bool__doc__[] = "get_bool() -- check whether boolean values are converted to bool"; static PyObject * pg_get_bool(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bool_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bool__doc__[] = "set_bool(on) -- set whether boolean values should be converted to bool"; static PyObject * pg_set_bool(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bool_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_bool() expects a boolean value as argument"); } return ret; } /* Get conversion of arrays to lists. */ static char pg_get_array__doc__[] = "get_array() -- check whether arrays are converted as lists"; static PyObject * pg_get_array(PyObject *self, PyObject *noargs) { PyObject *ret; ret = array_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set conversion of arrays to lists. */ static char pg_set_array__doc__[] = "set_array(on) -- set whether arrays should be converted to lists"; static PyObject * pg_set_array(PyObject* self, PyObject* args) { PyObject* ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { array_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_array() expects a boolean value as argument"); } return ret; } /* Check whether bytea values are unescaped. */ static char pg_get_bytea_escaped__doc__[] = "get_bytea_escaped() -- check whether bytea will be returned escaped"; static PyObject * pg_get_bytea_escaped(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bytea_escaped ? Py_True : Py_False; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bytea_escaped__doc__[] = "set_bytea_escaped(on) -- set whether bytea will be returned escaped"; static PyObject * pg_set_bytea_escaped(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bytea_escaped = i ? 1 : 0; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_bytea_escaped() expects" " a boolean value as argument"); } return ret; } /* set query helper functions (not part of public API) */ static char pg_set_query_helpers__doc__[] = "set_query_helpers(*helpers) -- set internal query helper functions"; static PyObject * pg_set_query_helpers(PyObject *self, PyObject *args) { /* gets arguments */ if (!PyArg_ParseTuple(args, "O!O!O!O!", &PyFunction_Type, &dictiter, &PyFunction_Type, &namediter, &PyFunction_Type, &namednext, &PyFunction_Type, &scalariter)) { return NULL; } Py_INCREF(Py_None); return Py_None; } /* Get json decode function. */ static char pg_get_jsondecode__doc__[] = "get_jsondecode() -- get the function used for decoding json results"; static PyObject * pg_get_jsondecode(PyObject *self, PyObject *noargs) { PyObject *ret; ret = jsondecode; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Set json decode function. */ static char pg_set_jsondecode__doc__[] = "set_jsondecode(func) -- set a function to be used for decoding json results"; static PyObject * pg_set_jsondecode(PyObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(jsondecode); jsondecode = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(jsondecode); jsondecode = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function jsondecode() expects" " a callable or None as argument"); } return ret; } #ifdef DEFAULT_VARS /* Get default host. */ static char pg_get_defhost__doc__[] = "get_defhost() -- return default database host"; static PyObject * pg_get_defhost(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_host); return pg_default_host; } /* Set default host. */ static char pg_set_defhost__doc__[] = "set_defhost(string) -- set default database host and return previous value"; static PyObject * pg_set_defhost(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defhost() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_host; if (tmp) { pg_default_host = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_host = Py_None; } return old; } /* Get default database. */ static char pg_get_defbase__doc__[] = "get_defbase() -- return default database name"; static PyObject * pg_get_defbase(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_base); return pg_default_base; } /* Set default database. */ static char pg_set_defbase__doc__[] = "set_defbase(string) -- set default database name and return previous value"; static PyObject * pg_set_defbase(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defbase() Argument a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_base; if (tmp) { pg_default_base = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_base = Py_None; } return old; } /* Get default options. */ static char pg_get_defopt__doc__[] = "get_defopt() -- return default database options"; static PyObject * pg_get_defopt(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_opt); return pg_default_opt; } /* Set default options. */ static char pg_set_defopt__doc__[] = "set_defopt(string) -- set default options and return previous value"; static PyObject * pg_setdefopt(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defopt() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_opt; if (tmp) { pg_default_opt = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_opt = Py_None; } return old; } /* Get default username. */ static char pg_get_defuser__doc__[] = "get_defuser() -- return default database username"; static PyObject * pg_get_defuser(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_user); return pg_default_user; } /* Set default username. */ static char pg_set_defuser__doc__[] = "set_defuser(name) -- set default username and return previous value"; static PyObject * pg_set_defuser(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defuser() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_user; if (tmp) { pg_default_user = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_user = Py_None; } return old; } /* Set default password. */ static char pg_set_defpasswd__doc__[] = "set_defpasswd(password) -- set default database password"; static PyObject * pg_set_defpasswd(PyObject *self, PyObject *args) { char *tmp = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defpasswd() expects a string or None as argument"); return NULL; } if (tmp) { pg_default_passwd = PyStr_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_passwd = Py_None; } Py_INCREF(Py_None); return Py_None; } /* Get default port. */ static char pg_get_defport__doc__[] = "get_defport() -- return default database port"; static PyObject * pg_get_defport(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_port); return pg_default_port; } /* Set default port. */ static char pg_set_defport__doc__[] = "set_defport(port) -- set default port and return previous value"; static PyObject * pg_set_defport(PyObject *self, PyObject *args) { long int port = -2; PyObject *old; /* gets arguments */ if ((!PyArg_ParseTuple(args, "l", &port)) || (port < -1)) { PyErr_SetString(PyExc_TypeError, "Function set_deport expects" " a positive integer or -1 as argument"); return NULL; } /* adjusts value */ old = pg_default_port; if (port != -1) { pg_default_port = PyInt_FromLong(port); } else { Py_INCREF(Py_None); pg_default_port = Py_None; } return old; } #endif /* DEFAULT_VARS */ /* Cast a string with a text representation of an array to a list. */ static char pg_cast_array__doc__[] = "cast_array(string, cast=None, delim=',') -- cast a string as an array"; PyObject * pg_cast_array(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size; int encoding; if (!PyArg_ParseTupleAndKeywords( args, dict, "O|Oc", (char**) kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a string as first argument"); return NULL; } if (!cast_obj || cast_obj == Py_None) { if (cast_obj) { Py_DECREF(cast_obj); cast_obj = NULL; } } else if (!PyCallable_Check(cast_obj)) { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a callable as second argument"); return NULL; } ret = cast_array(string, size, encoding, 0, cast_obj, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of a record to a tuple. */ static char pg_cast_record__doc__[] = "cast_record(string, cast=None, delim=',') -- cast a string as a record"; PyObject * pg_cast_record(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size, len; int encoding; if (!PyArg_ParseTupleAndKeywords( args, dict, "O|Oc", (char**) kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_record() expects a string as first argument"); return NULL; } if (!cast_obj || PyCallable_Check(cast_obj)) { len = 0; } else if (cast_obj == Py_None) { Py_DECREF(cast_obj); cast_obj = NULL; len = 0; } else if (PyTuple_Check(cast_obj) || PyList_Check(cast_obj)) { len = PySequence_Size(cast_obj); if (!len) { Py_DECREF(cast_obj); cast_obj = NULL; } } else { PyErr_SetString(PyExc_TypeError, "Function cast_record() expects a callable" " or tuple or list of callables as second argument"); return NULL; } ret = cast_record(string, size, encoding, 0, cast_obj, len, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of an hstore to a dict. */ static char pg_cast_hstore__doc__[] = "cast_hstore(string) -- cast a string as an hstore"; PyObject * pg_cast_hstore(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, *ret; char *s; Py_ssize_t size; int encoding; if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &s, &size); encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string)) { tmp_obj = PyUnicode_AsUTF8String(string); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &s, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_hstore() expects a string as first argument"); return NULL; } ret = cast_hstore(s, size, encoding); Py_XDECREF(tmp_obj); return ret; } /* The list of functions defined in the module */ static struct PyMethodDef pg_methods[] = { {"connect", (PyCFunction) pg_connect, METH_VARARGS|METH_KEYWORDS, pg_connect__doc__}, {"escape_string", (PyCFunction) pg_escape_string, METH_O, pg_escape_string__doc__}, {"escape_bytea", (PyCFunction) pg_escape_bytea, METH_O, pg_escape_bytea__doc__}, {"unescape_bytea", (PyCFunction) pg_unescape_bytea, METH_O, pg_unescape_bytea__doc__}, {"get_datestyle", (PyCFunction) pg_get_datestyle, METH_NOARGS, pg_get_datestyle__doc__}, {"set_datestyle", (PyCFunction) pg_set_datestyle, METH_VARARGS, pg_set_datestyle__doc__}, {"get_decimal_point", (PyCFunction) pg_get_decimal_point, METH_NOARGS, pg_get_decimal_point__doc__}, {"set_decimal_point", (PyCFunction) pg_set_decimal_point, METH_VARARGS, pg_set_decimal_point__doc__}, {"get_decimal", (PyCFunction) pg_get_decimal, METH_NOARGS, pg_get_decimal__doc__}, {"set_decimal", (PyCFunction) pg_set_decimal, METH_O, pg_set_decimal__doc__}, {"get_bool", (PyCFunction) pg_get_bool, METH_NOARGS, pg_get_bool__doc__}, {"set_bool", (PyCFunction) pg_set_bool, METH_VARARGS, pg_set_bool__doc__}, {"get_array", (PyCFunction) pg_get_array, METH_NOARGS, pg_get_array__doc__}, {"set_array", (PyCFunction) pg_set_array, METH_VARARGS, pg_set_array__doc__}, {"set_query_helpers", (PyCFunction) pg_set_query_helpers, METH_VARARGS, pg_set_query_helpers__doc__}, {"get_bytea_escaped", (PyCFunction) pg_get_bytea_escaped, METH_NOARGS, pg_get_bytea_escaped__doc__}, {"set_bytea_escaped", (PyCFunction) pg_set_bytea_escaped, METH_VARARGS, pg_set_bytea_escaped__doc__}, {"get_jsondecode", (PyCFunction) pg_get_jsondecode, METH_NOARGS, pg_get_jsondecode__doc__}, {"set_jsondecode", (PyCFunction) pg_set_jsondecode, METH_O, pg_set_jsondecode__doc__}, {"cast_array", (PyCFunction) pg_cast_array, METH_VARARGS|METH_KEYWORDS, pg_cast_array__doc__}, {"cast_record", (PyCFunction) pg_cast_record, METH_VARARGS|METH_KEYWORDS, pg_cast_record__doc__}, {"cast_hstore", (PyCFunction) pg_cast_hstore, METH_O, pg_cast_hstore__doc__}, #ifdef DEFAULT_VARS {"get_defhost", pg_get_defhost, METH_NOARGS, pg_get_defhost__doc__}, {"set_defhost", pg_set_defhost, METH_VARARGS, pg_set_defhost__doc__}, {"get_defbase", pg_get_defbase, METH_NOARGS, pg_get_defbase__doc__}, {"set_defbase", pg_set_defbase, METH_VARARGS, pg_set_defbase__doc__}, {"get_defopt", pg_get_defopt, METH_NOARGS, pg_get_defopt__doc__}, {"set_defopt", pg_setdefopt, METH_VARARGS, pg_set_defopt__doc__}, {"get_defport", pg_get_defport, METH_NOARGS, pg_get_defport__doc__}, {"set_defport", pg_set_defport, METH_VARARGS, pg_set_defport__doc__}, {"get_defuser", pg_get_defuser, METH_NOARGS, pg_get_defuser__doc__}, {"set_defuser", pg_set_defuser, METH_VARARGS, pg_set_defuser__doc__}, {"set_defpasswd", pg_set_defpasswd, METH_VARARGS, pg_set_defpasswd__doc__}, #endif /* DEFAULT_VARS */ {NULL, NULL} /* sentinel */ }; static char pg__doc__[] = "Python interface to PostgreSQL DB"; static struct PyModuleDef moduleDef = { PyModuleDef_HEAD_INIT, "_pg", /* m_name */ pg__doc__, /* m_doc */ -1, /* m_size */ pg_methods /* m_methods */ }; /* Initialization function for the module */ MODULE_INIT_FUNC(_pg) { PyObject *mod, *dict, *s; /* Create the module and add the functions */ mod = PyModule_Create(&moduleDef); /* Initialize here because some Windows platforms get confused otherwise */ #if IS_PY3 connType.tp_base = noticeType.tp_base = queryType.tp_base = sourceType.tp_base = &PyBaseObject_Type; #ifdef LARGE_OBJECTS largeType.tp_base = &PyBaseObject_Type; #endif #else connType.ob_type = noticeType.ob_type = queryType.ob_type = sourceType.ob_type = &PyType_Type; #ifdef LARGE_OBJECTS largeType.ob_type = &PyType_Type; #endif #endif if (PyType_Ready(&connType) || PyType_Ready(¬iceType) || PyType_Ready(&queryType) || PyType_Ready(&sourceType) #ifdef LARGE_OBJECTS || PyType_Ready(&largeType) #endif ) { return NULL; } dict = PyModule_GetDict(mod); /* Exceptions as defined by DB-API 2.0 */ Error = PyErr_NewException("pg.Error", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Error", Error); Warning = PyErr_NewException("pg.Warning", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Warning", Warning); InterfaceError = PyErr_NewException( "pg.InterfaceError", Error, NULL); PyDict_SetItemString(dict, "InterfaceError", InterfaceError); DatabaseError = PyErr_NewException( "pg.DatabaseError", Error, NULL); PyDict_SetItemString(dict, "DatabaseError", DatabaseError); InternalError = PyErr_NewException( "pg.InternalError", DatabaseError, NULL); PyDict_SetItemString(dict, "InternalError", InternalError); OperationalError = PyErr_NewException( "pg.OperationalError", DatabaseError, NULL); PyDict_SetItemString(dict, "OperationalError", OperationalError); ProgrammingError = PyErr_NewException( "pg.ProgrammingError", DatabaseError, NULL); PyDict_SetItemString(dict, "ProgrammingError", ProgrammingError); IntegrityError = PyErr_NewException( "pg.IntegrityError", DatabaseError, NULL); PyDict_SetItemString(dict, "IntegrityError", IntegrityError); DataError = PyErr_NewException( "pg.DataError", DatabaseError, NULL); PyDict_SetItemString(dict, "DataError", DataError); NotSupportedError = PyErr_NewException( "pg.NotSupportedError", DatabaseError, NULL); PyDict_SetItemString(dict, "NotSupportedError", NotSupportedError); InvalidResultError = PyErr_NewException( "pg.InvalidResultError", DataError, NULL); PyDict_SetItemString(dict, "InvalidResultError", InvalidResultError); NoResultError = PyErr_NewException( "pg.NoResultError", InvalidResultError, NULL); PyDict_SetItemString(dict, "NoResultError", NoResultError); MultipleResultsError = PyErr_NewException( "pg.MultipleResultsError", InvalidResultError, NULL); PyDict_SetItemString(dict, "MultipleResultsError", MultipleResultsError); /* Make the version available */ s = PyStr_FromString(PyPgVersion); PyDict_SetItemString(dict, "version", s); PyDict_SetItemString(dict, "__version__", s); Py_DECREF(s); /* Result types for queries */ PyDict_SetItemString(dict, "RESULT_EMPTY", PyInt_FromLong(RESULT_EMPTY)); PyDict_SetItemString(dict, "RESULT_DML", PyInt_FromLong(RESULT_DML)); PyDict_SetItemString(dict, "RESULT_DDL", PyInt_FromLong(RESULT_DDL)); PyDict_SetItemString(dict, "RESULT_DQL", PyInt_FromLong(RESULT_DQL)); /* Transaction states */ PyDict_SetItemString(dict,"TRANS_IDLE",PyInt_FromLong(PQTRANS_IDLE)); PyDict_SetItemString(dict,"TRANS_ACTIVE",PyInt_FromLong(PQTRANS_ACTIVE)); PyDict_SetItemString(dict,"TRANS_INTRANS",PyInt_FromLong(PQTRANS_INTRANS)); PyDict_SetItemString(dict,"TRANS_INERROR",PyInt_FromLong(PQTRANS_INERROR)); PyDict_SetItemString(dict,"TRANS_UNKNOWN",PyInt_FromLong(PQTRANS_UNKNOWN)); #ifdef LARGE_OBJECTS /* Create mode for large objects */ PyDict_SetItemString(dict, "INV_READ", PyInt_FromLong(INV_READ)); PyDict_SetItemString(dict, "INV_WRITE", PyInt_FromLong(INV_WRITE)); /* Position flags for lo_lseek */ PyDict_SetItemString(dict, "SEEK_SET", PyInt_FromLong(SEEK_SET)); PyDict_SetItemString(dict, "SEEK_CUR", PyInt_FromLong(SEEK_CUR)); PyDict_SetItemString(dict, "SEEK_END", PyInt_FromLong(SEEK_END)); #endif /* LARGE_OBJECTS */ #ifdef DEFAULT_VARS /* Prepare default values */ Py_INCREF(Py_None); pg_default_host = Py_None; Py_INCREF(Py_None); pg_default_base = Py_None; Py_INCREF(Py_None); pg_default_opt = Py_None; Py_INCREF(Py_None); pg_default_port = Py_None; Py_INCREF(Py_None); pg_default_user = Py_None; Py_INCREF(Py_None); pg_default_passwd = Py_None; #endif /* DEFAULT_VARS */ /* Store common pg encoding ids */ pg_encoding_utf8 = pg_char_to_encoding("UTF8"); pg_encoding_latin1 = pg_char_to_encoding("LATIN1"); pg_encoding_ascii = pg_char_to_encoding("SQL_ASCII"); /* Check for errors */ if (PyErr_Occurred()) { return NULL; } return mod; } pygresql-5.1.2/pgnotice.c000066400000000000000000000076041365010227600153520ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The notice object - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Get notice object attributes. */ static PyObject * notice_getattr(noticeObject *self, PyObject *nameobj) { PGresult const *res = self->res; const char *name = PyStr_AsString(nameobj); int fieldcode; if (!res) { PyErr_SetString(PyExc_TypeError, "Cannot get current notice"); return NULL; } /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (self->pgcnx && _check_cnx_obj(self->pgcnx)) { Py_INCREF(self->pgcnx); return (PyObject *) self->pgcnx; } else { Py_INCREF(Py_None); return Py_None; } } /* full message */ if (!strcmp(name, "message")) { return PyStr_FromString(PQresultErrorMessage(res)); } /* other possible fields */ fieldcode = 0; if (!strcmp(name, "severity")) fieldcode = PG_DIAG_SEVERITY; else if (!strcmp(name, "primary")) fieldcode = PG_DIAG_MESSAGE_PRIMARY; else if (!strcmp(name, "detail")) fieldcode = PG_DIAG_MESSAGE_DETAIL; else if (!strcmp(name, "hint")) fieldcode = PG_DIAG_MESSAGE_HINT; if (fieldcode) { char *s = PQresultErrorField(res, fieldcode); if (s) { return PyStr_FromString(s); } else { Py_INCREF(Py_None); return Py_None; } } return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Get the list of notice attributes. */ static PyObject * notice_dir(noticeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[ssssss]", "pgcnx", "severity", "message", "primary", "detail", "hint"); return attrs; } /* Return notice as string in human readable form. */ static PyObject * notice_str(noticeObject *self) { return notice_getattr(self, PyBytes_FromString("message")); } /* Notice object methods */ static struct PyMethodDef notice_methods[] = { {"__dir__", (PyCFunction) notice_dir, METH_NOARGS, NULL}, {NULL, NULL} }; static char notice__doc__[] = "PostgreSQL notice object"; /* Notice type definition */ static PyTypeObject noticeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Notice", /* tp_name */ sizeof(noticeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ 0, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) notice_str, /* tp_str */ (getattrofunc) notice_getattr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ notice__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ notice_methods, /* tp_methods */ }; pygresql-5.1.2/pgquery.c000066400000000000000000000537421365010227600152420ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The query object - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate the query object. */ static void query_dealloc(queryObject *self) { Py_XDECREF(self->pgcnx); if (self->col_types) { PyMem_Free(self->col_types); } if (self->result) { PQclear(self->result); } PyObject_Del(self); } /* Return query as string in human readable form. */ static PyObject * query_str(queryObject *self) { return format_result(self->result); } /* Return length of a query object. */ static Py_ssize_t query_len(PyObject *self) { PyObject *tmp; Py_ssize_t len; tmp = PyLong_FromLong(((queryObject*) self)->max_row); len = PyLong_AsSsize_t(tmp); Py_DECREF(tmp); return len; } /* Return the value in the given column of the current row. */ static PyObject * _query_value_in_column(queryObject *self, int column) { char *s; int type; if (PQgetisnull(self->result, self->current_row, column)) { Py_INCREF(Py_None); return Py_None; } /* get the string representation of the value */ /* note: this is always null-terminated text format */ s = PQgetvalue(self->result, self->current_row, column); /* get the PyGreSQL type of the column */ type = self->col_types[column]; /* cast the string representation into a Python object */ if (type & PYGRES_ARRAY) return cast_array(s, PQgetlength(self->result, self->current_row, column), self->encoding, type, NULL, 0); if (type == PYGRES_BYTEA) return cast_bytea_text(s); if (type == PYGRES_OTHER) return cast_other(s, PQgetlength(self->result, self->current_row, column), self->encoding, PQftype(self->result, column), self->pgcnx->cast_hook); if (type & PYGRES_TEXT) return cast_sized_text(s, PQgetlength(self->result, self->current_row, column), self->encoding, type); return cast_unsized_simple(s, type); } /* Return the current row as a tuple. */ static PyObject * _query_row_as_tuple(queryObject *self) { PyObject *row_tuple = NULL; int j; if (!(row_tuple = PyTuple_New(self->num_fields))) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_tuple); return NULL; } PyTuple_SET_ITEM(row_tuple, j, val); } return row_tuple; } /* Return given item from a query object. */ static PyObject * query_getitem(PyObject *self, Py_ssize_t i) { queryObject *q = (queryObject *) self; PyObject *tmp; long row; tmp = PyLong_FromSize_t((size_t) i); row = PyLong_AsLong(tmp); Py_DECREF(tmp); if (row < 0 || row >= q->max_row) { PyErr_SetNone(PyExc_IndexError); return NULL; } q->current_row = (int) row; return _query_row_as_tuple(q); } /* __iter__() method of the queryObject: Returns the default iterator yielding rows as tuples. */ static PyObject* query_iter(queryObject *self) { self->current_row = 0; Py_INCREF(self); return (PyObject*) self; } /* __next__() method of the queryObject: Returns the current current row as a tuple and moves to the next one. */ static PyObject * query_next(queryObject *self, PyObject *noargs) { PyObject *row_tuple = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Get number of rows. */ static char query_ntuples__doc__[] = "ntuples() -- return number of tuples returned by query"; static PyObject * query_ntuples(queryObject *self, PyObject *noargs) { return PyInt_FromLong(self->max_row); } /* List field names from query result. */ static char query_listfields__doc__[] = "listfields() -- List field names from result"; static PyObject * query_listfields(queryObject *self, PyObject *noargs) { int i; char *name; PyObject *fieldstuple, *str; /* builds tuple */ fieldstuple = PyTuple_New(self->num_fields); if (fieldstuple) { for (i = 0; i < self->num_fields; ++i) { name = PQfname(self->result, i); str = PyStr_FromString(name); PyTuple_SET_ITEM(fieldstuple, i, str); } } return fieldstuple; } /* Get field name from number in last result. */ static char query_fieldname__doc__[] = "fieldname(num) -- return name of field from result from its position"; static PyObject * query_fieldname(queryObject *self, PyObject *args) { int i; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "i", &i)) { PyErr_SetString(PyExc_TypeError, "Method fieldname() takes an integer as argument"); return NULL; } /* checks number validity */ if (i >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Invalid field number"); return NULL; } /* gets fields name and builds object */ name = PQfname(self->result, i); return PyStr_FromString(name); } /* Get field number from name in last result. */ static char query_fieldnum__doc__[] = "fieldnum(name) -- return position in query for field from its name"; static PyObject * query_fieldnum(queryObject *self, PyObject *args) { int num; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method fieldnum() takes a string as argument"); return NULL; } /* gets field number */ if ((num = PQfnumber(self->result, name)) == -1) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return NULL; } return PyInt_FromLong(num); } /* Retrieve one row from the result as a tuple. */ static char query_one__doc__[] = "one() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_one(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Retrieve the single row from the result as a tuple. */ static char query_single__doc__[] = "single() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a tuple of fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_single(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Retrieve the last query result as a list of tuples. */ static char query_getresult__doc__[] = "getresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a tuple of fields\n" "in the order returned by the server.\n"; static PyObject * query_getresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_tuple = query_next(self, noargs); if (!row_tuple) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_tuple); } return result_list; } /* Return the current row as a dict. */ static PyObject * _query_row_as_dict(queryObject *self) { PyObject *row_dict = NULL; int j; if (!(row_dict = PyDict_New())) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_dict); return NULL; } PyDict_SetItemString(row_dict, PQfname(self->result, j), val); Py_DECREF(val); } return row_dict; } /* Return the current current row as a dict and move to the next one. */ static PyObject * query_next_dict(queryObject *self, PyObject *noargs) { PyObject *row_dict = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve one row from the result as a dictionary. */ static char query_onedict__doc__[] = "onedict() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onedict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve the single row from the result as a dictionary. */ static char query_singledict__doc__[] = "singledict() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singledict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve the last query result as a list of dictionaries. */ static char query_dictresult__doc__[] = "dictresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a dictionary with\n" "the field names used as the keys.\n"; static PyObject * query_dictresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_dict = query_next_dict(self, noargs); if (!row_dict) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_dict); } return result_list; } /* Retrieve last result as iterator of dictionaries. */ static char query_dictiter__doc__[] = "dictiter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a a dictionary\n" "with the field names used as the keys.\n"; static PyObject * query_dictiter(queryObject *self, PyObject *noargs) { if (!dictiter) { return query_dictresult(self, noargs); } return PyObject_CallFunction(dictiter, "(O)", self); } /* Retrieve one row from the result as a named tuple. */ static char query_onenamed__doc__[] = "onenamed() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a named tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onenamed(queryObject *self, PyObject *noargs) { if (!namednext) { return query_one(self, noargs); } if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve the single row from the result as a tuple. */ static char query_singlenamed__doc__[] = "singlenamed() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as named tuple of fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singlenamed(queryObject *self, PyObject *noargs) { if (!namednext) { return query_single(self, noargs); } if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve last result as list of named tuples. */ static char query_namedresult__doc__[] = "namedresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a named tuple of fields\n" "in the order returned by the server.\n"; static PyObject * query_namedresult(queryObject *self, PyObject *noargs) { PyObject *res, *res_list; if (!namediter) { return query_getresult(self, noargs); } res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (PyList_Check(res)) return res; res_list = PySequence_List(res); Py_DECREF(res); return res_list; } /* Retrieve last result as iterator of named tuples. */ static char query_namediter__doc__[] = "namediter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a named tuple\n" "of fields in the order returned by the server.\n"; static PyObject * query_namediter(queryObject *self, PyObject *noargs) { PyObject *res, *res_iter; if (!namediter) { return query_iter(self); } res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (!PyList_Check(res)) return res; res_iter = (Py_TYPE(res)->tp_iter)((PyObject *) self); Py_DECREF(res); return res_iter; } /* Retrieve the last query result as a list of scalar values. */ static char query_scalarresult__doc__[] = "scalarresult() -- Get query result as scalars\n\n" "The result is returned as a list of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalarresult(queryObject *self, PyObject *noargs) { PyObject *result_list; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (self->current_row = 0; self->current_row < self->max_row; ++self->current_row) { PyObject *value = _query_value_in_column(self, 0); if (!value) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, self->current_row, value); } return result_list; } /* Retrieve the last query result as iterator of scalar values. */ static char query_scalariter__doc__[] = "scalariter() -- Get query result as scalars\n\n" "The result is returned as an iterator of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalariter(queryObject *self, PyObject *noargs) { if (!scalariter) { return query_scalarresult(self, noargs); } if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } return PyObject_CallFunction(scalariter, "(O)", self); } /* Retrieve one result as scalar value. */ static char query_onescalar__doc__[] = "onescalar() -- Get one scalar value from the result of a query\n\n" "Returns the first field of the next row from the result as a scalar value.\n" "This method can be called multiple times to return more rows as scalars.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onescalar(queryObject *self, PyObject *noargs) { PyObject *value; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } value = _query_value_in_column(self, 0); if (value) ++self->current_row; return value; } /* Retrieves the single row from the result as a tuple. */ static char query_singlescalar__doc__[] = "singlescalar() -- Get scalar value from single result of a query\n\n" "Returns the first field of the next row from the result as a scalar value.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one row,\n" "which will be of type NoResultError or MultipleResultsError specifically.\n"; static PyObject * query_singlescalar(queryObject *self, PyObject *noargs) { PyObject *value; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; value = _query_value_in_column(self, 0); if (value) ++self->current_row; return value; } /* Query sequence protocol methods */ static PySequenceMethods query_sequence_methods = { (lenfunc) query_len, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ (ssizeargfunc) query_getitem, /* sq_item */ 0, /* sq_ass_item */ 0, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; /* Query object methods */ static struct PyMethodDef query_methods[] = { {"getresult", (PyCFunction) query_getresult, METH_NOARGS, query_getresult__doc__}, {"dictresult", (PyCFunction) query_dictresult, METH_NOARGS, query_dictresult__doc__}, {"dictiter", (PyCFunction) query_dictiter, METH_NOARGS, query_dictiter__doc__}, {"namedresult", (PyCFunction) query_namedresult, METH_NOARGS, query_namedresult__doc__}, {"namediter", (PyCFunction) query_namediter, METH_NOARGS, query_namediter__doc__}, {"one", (PyCFunction) query_one, METH_NOARGS, query_one__doc__}, {"single", (PyCFunction) query_single, METH_NOARGS, query_single__doc__}, {"onedict", (PyCFunction) query_onedict, METH_NOARGS, query_onedict__doc__}, {"singledict", (PyCFunction) query_singledict, METH_NOARGS, query_singledict__doc__}, {"onenamed", (PyCFunction) query_onenamed, METH_NOARGS, query_onenamed__doc__}, {"singlenamed", (PyCFunction) query_singlenamed, METH_NOARGS, query_singlenamed__doc__}, {"scalarresult", (PyCFunction) query_scalarresult, METH_NOARGS, query_scalarresult__doc__}, {"scalariter", (PyCFunction) query_scalariter, METH_NOARGS, query_scalariter__doc__}, {"onescalar", (PyCFunction) query_onescalar, METH_NOARGS, query_onescalar__doc__}, {"singlescalar", (PyCFunction) query_singlescalar, METH_NOARGS, query_singlescalar__doc__}, {"fieldname", (PyCFunction) query_fieldname, METH_VARARGS, query_fieldname__doc__}, {"fieldnum", (PyCFunction) query_fieldnum, METH_VARARGS, query_fieldnum__doc__}, {"listfields", (PyCFunction) query_listfields, METH_NOARGS, query_listfields__doc__}, {"ntuples", (PyCFunction) query_ntuples, METH_NOARGS, query_ntuples__doc__}, {NULL, NULL} }; static char query__doc__[] = "PyGreSQL query object"; /* Query type definition */ static PyTypeObject queryType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Query", /* tp_name */ sizeof(queryObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) query_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ &query_sequence_methods, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) query_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT |Py_TPFLAGS_HAVE_ITER, /* tp_flags */ query__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc) query_iter, /* tp_iter */ (iternextfunc) query_next, /* tp_iternext */ query_methods, /* tp_methods */ }; pygresql-5.1.2/pgsource.c000066400000000000000000000567211365010227600153750ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The source object - this file is part a of the C extension module. * * Copyright (c) 2020 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate source object. */ static void source_dealloc(sourceObject *self) { if (self->result) PQclear(self->result); Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return source object as string in human readable form. */ static PyObject * source_str(sourceObject *self) { switch (self->result_type) { case RESULT_DQL: return format_result(self->result); case RESULT_DDL: case RESULT_DML: return PyStr_FromString(PQcmdStatus(self->result)); case RESULT_EMPTY: default: return PyStr_FromString("(empty PostgreSQL source object)"); } } /* Check source object validity. */ static int _check_source_obj(sourceObject *self, int level) { if (!self->valid) { set_error_msg(OperationalError, "Object has been closed"); return 0; } if ((level & CHECK_RESULT) && !self->result) { set_error_msg(DatabaseError, "No result"); return 0; } if ((level & CHECK_DQL) && self->result_type != RESULT_DQL) { set_error_msg(DatabaseError, "Last query did not return tuples"); return 0; } if ((level & CHECK_CNX) && !_check_cnx_obj(self->pgcnx)) { return 0; } return 1; } /* Get source object attributes. */ static PyObject * source_getattr(sourceObject *self, PyObject *nameobj) { const char *name = PyStr_AsString(nameobj); /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_source_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *) (self->pgcnx); } Py_INCREF(Py_None); return Py_None; } /* arraysize */ if (!strcmp(name, "arraysize")) return PyInt_FromLong(self->arraysize); /* resulttype */ if (!strcmp(name, "resulttype")) return PyInt_FromLong(self->result_type); /* ntuples */ if (!strcmp(name, "ntuples")) return PyInt_FromLong(self->max_row); /* nfields */ if (!strcmp(name, "nfields")) return PyInt_FromLong(self->num_fields); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *) self, nameobj); } /* Set source object attributes. */ static int source_setattr(sourceObject *self, char *name, PyObject *v) { /* arraysize */ if (!strcmp(name, "arraysize")) { if (!PyInt_Check(v)) { PyErr_SetString(PyExc_TypeError, "arraysize must be integer"); return -1; } self->arraysize = PyInt_AsLong(v); return 0; } /* unknown attribute */ PyErr_SetString(PyExc_TypeError, "Not a writable attribute"); return -1; } /* Close object. */ static char source_close__doc__[] = "close() -- close query object without deleting it\n\n" "All instances of the query object can no longer be used after this call.\n"; static PyObject * source_close(sourceObject *self, PyObject *noargs) { /* frees result if necessary and invalidates object */ if (self->result) { PQclear(self->result); self->result_type = RESULT_EMPTY; self->result = NULL; } self->valid = 0; /* return None */ Py_INCREF(Py_None); return Py_None; } /* Database query. */ static char source_execute__doc__[] = "execute(sql) -- execute a SQL statement (string)\n\n" "On success, this call returns the number of affected rows, or None\n" "for DQL (SELECT, ...) statements. The fetch (fetch(), fetchone()\n" "and fetchall()) methods can be used to get result rows.\n"; static PyObject * source_execute(sourceObject *self, PyObject *sql) { PyObject *tmp_obj = NULL; /* auxiliary string object */ char *query; int encoding; /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } encoding = PQclientEncoding(self->pgcnx->cnx); if (PyBytes_Check(sql)) { query = PyBytes_AsString(sql); } else if (PyUnicode_Check(sql)) { tmp_obj = get_encoded_string(sql, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(tmp_obj); } else { PyErr_SetString(PyExc_TypeError, "Method execute() expects a string as argument"); return NULL; } /* frees previous result */ if (self->result) { PQclear(self->result); self->result = NULL; } self->max_row = 0; self->current_row = 0; self->num_fields = 0; self->encoding = encoding; /* gets result */ Py_BEGIN_ALLOW_THREADS self->result = PQexec(self->pgcnx->cnx, query); Py_END_ALLOW_THREADS /* we don't need the auxiliary string any more */ Py_XDECREF(tmp_obj); /* checks result validity */ if (!self->result) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->pgcnx->date_format = date_format; /* this is normally NULL */ /* checks result status */ switch (PQresultStatus(self->result)) { /* query succeeded */ case PGRES_TUPLES_OK: /* DQL: returns None (DB-SIG compliant) */ self->result_type = RESULT_DQL; self->max_row = PQntuples(self->result); self->num_fields = PQnfields(self->result); Py_INCREF(Py_None); return Py_None; case PGRES_COMMAND_OK: /* other requests */ case PGRES_COPY_OUT: case PGRES_COPY_IN: { long num_rows; char *tmp; tmp = PQcmdTuples(self->result); if (tmp[0]) { self->result_type = RESULT_DML; num_rows = atol(tmp); } else { self->result_type = RESULT_DDL; num_rows = -1; } return PyInt_FromLong(num_rows); } /* query failed */ case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute command", self->pgcnx->cnx, self->result); break; default: set_error_msg(InternalError, "Internal error: unknown result status"); } /* frees result and returns error */ PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; return NULL; } /* Get oid status for last query (valid for INSERTs, 0 for other). */ static char source_oidstatus__doc__[] = "oidstatus() -- return oid of last inserted row (if available)"; static PyObject * source_oidstatus(sourceObject *self, PyObject *noargs) { Oid oid; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT)) { return NULL; } /* retrieves oid status */ if ((oid = PQoidValue(self->result)) == InvalidOid) { Py_INCREF(Py_None); return Py_None; } return PyInt_FromLong(oid); } /* Fetch rows from last result. */ static char source_fetch__doc__[] = "fetch(num) -- return the next num rows from the last result in a list\n\n" "If num parameter is omitted arraysize attribute value is used.\n" "If size equals -1, all rows are fetched.\n"; static PyObject * source_fetch(sourceObject *self, PyObject *args) { PyObject *res_list; int i, k; long size; #if IS_PY3 int encoding; #endif /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL | CHECK_CNX)) { return NULL; } /* checks args */ size = self->arraysize; if (!PyArg_ParseTuple(args, "|l", &size)) { PyErr_SetString(PyExc_TypeError, "fetch(num), with num (integer, optional)"); return NULL; } /* seeks last line */ /* limit size to be within the amount of data we actually have */ if (size == -1 || (self->max_row - self->current_row) < size) { size = self->max_row - self->current_row; } /* allocate list for result */ if (!(res_list = PyList_New(0))) return NULL; #if IS_PY3 encoding = self->encoding; #endif /* builds result */ for (i = 0, k = self->current_row; i < size; ++i, ++k) { PyObject *rowtuple; int j; if (!(rowtuple = PyTuple_New(self->num_fields))) { Py_DECREF(res_list); return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *str; if (PQgetisnull(self->result, k, j)) { Py_INCREF(Py_None); str = Py_None; } else { char *s = PQgetvalue(self->result, k, j); Py_ssize_t size = PQgetlength(self->result, k, j); #if IS_PY3 if (PQfformat(self->result, j) == 0) { /* textual format */ str = get_decoded_string(s, size, encoding); if (!str) /* cannot decode */ str = PyBytes_FromStringAndSize(s, size); } else #endif str = PyBytes_FromStringAndSize(s, size); } PyTuple_SET_ITEM(rowtuple, j, str); } if (PyList_Append(res_list, rowtuple)) { Py_DECREF(rowtuple); Py_DECREF(res_list); return NULL; } Py_DECREF(rowtuple); } self->current_row = k; return res_list; } /* Change current row (internal wrapper for all "move" methods). */ static PyObject * _source_move(sourceObject *self, int move) { /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* changes the current row */ switch (move) { case QUERY_MOVEFIRST: self->current_row = 0; break; case QUERY_MOVELAST: self->current_row = self->max_row - 1; break; case QUERY_MOVENEXT: if (self->current_row != self->max_row) ++self->current_row; break; case QUERY_MOVEPREV: if (self->current_row > 0) self->current_row--; break; } Py_INCREF(Py_None); return Py_None; } /* Move to first result row. */ static char source_movefirst__doc__[] = "movefirst() -- move to first result row"; static PyObject * source_movefirst(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEFIRST); } /* Move to last result row. */ static char source_movelast__doc__[] = "movelast() -- move to last valid result row"; static PyObject * source_movelast(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVELAST); } /* Move to next result row. */ static char source_movenext__doc__[] = "movenext() -- move to next result row"; static PyObject * source_movenext(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVENEXT); } /* Move to previous result row. */ static char source_moveprev__doc__[] = "moveprev() -- move to previous result row"; static PyObject * source_moveprev(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEPREV); } /* Put copy data. */ static char source_putdata__doc__[] = "putdata(buffer) -- send data to server during copy from stdin"; static PyObject * source_putdata(sourceObject *self, PyObject *buffer) { PyObject *tmp_obj = NULL; /* an auxiliary object */ char *buf; /* the buffer as encoded string */ Py_ssize_t nbytes; /* length of string */ char *errormsg = NULL; /* error message */ int res; /* direct result of the operation */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (buffer == Py_None) { /* pass None for terminating the operation */ buf = errormsg = NULL; } else if (PyBytes_Check(buffer)) { /* or pass a byte string */ PyBytes_AsStringAndSize(buffer, &buf, &nbytes); } else if (PyUnicode_Check(buffer)) { /* or pass a unicode string */ tmp_obj = get_encoded_string( buffer, PQclientEncoding(self->pgcnx->cnx)); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &buf, &nbytes); } else if (PyErr_GivenExceptionMatches(buffer, PyExc_BaseException)) { /* or pass a Python exception for sending an error message */ tmp_obj = PyObject_Str(buffer); if (PyUnicode_Check(tmp_obj)) { PyObject *obj = tmp_obj; tmp_obj = get_encoded_string( obj, PQclientEncoding(self->pgcnx->cnx)); Py_DECREF(obj); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ } errormsg = PyBytes_AsString(tmp_obj); buf = NULL; } else { PyErr_SetString(PyExc_TypeError, "Method putdata() expects a buffer, None" " or an exception as argument"); return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_IN) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_in state"); Py_XDECREF(tmp_obj); return NULL; } if (buf) { res = nbytes ? PQputCopyData(self->pgcnx->cnx, buf, (int) nbytes) : 1; } else { res = PQputCopyEnd(self->pgcnx->cnx, errormsg); } Py_XDECREF(tmp_obj); if (res != 1) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (buf) { /* buffer has been sent */ ret = Py_None; Py_INCREF(ret); } else { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyInt_FromLong(num_rows); } else { if (!errormsg) errormsg = PQerrorMessage(self->pgcnx->cnx); PyErr_SetString(PyExc_IOError, errormsg); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } return ret; /* None or number of rows */ } /* Get copy data. */ static char source_getdata__doc__[] = "getdata(decode) -- receive data to server during copy to stdout"; static PyObject * source_getdata(sourceObject *self, PyObject *args) { int *decode = 0; /* decode flag */ char *buffer; /* the copied buffer as encoded byte string */ Py_ssize_t nbytes; /* length of the byte string */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (!PyArg_ParseTuple(args, "|i", &decode)) { return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_OUT) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_out state"); return NULL; } nbytes = PQgetCopyData(self->pgcnx->cnx, &buffer, 0); if (!nbytes || nbytes < -1) { /* an error occurred */ PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (nbytes == -1) { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyInt_FromLong(num_rows); } else { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } else { /* a row has been returned */ ret = decode ? get_decoded_string( buffer, nbytes, PQclientEncoding(self->pgcnx->cnx)) : PyBytes_FromStringAndSize(buffer, nbytes); PQfreemem(buffer); } return ret; /* buffer or number of rows */ } /* Find field number from string/integer (internal use only). */ static int _source_fieldindex(sourceObject *self, PyObject *param, const char *usage) { int num; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) return -1; /* gets field number */ if (PyStr_Check(param)) { num = PQfnumber(self->result, PyBytes_AsString(param)); } else if (PyInt_Check(param)) { num = (int) PyInt_AsLong(param); } else { PyErr_SetString(PyExc_TypeError, usage); return -1; } /* checks field validity */ if (num < 0 || num >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return -1; } return num; } /* Build field information from position (internal use only). */ static PyObject * _source_buildinfo(sourceObject *self, int num) { PyObject *result; /* allocates tuple */ result = PyTuple_New(5); if (!result) { return NULL; } /* affects field information */ PyTuple_SET_ITEM(result, 0, PyInt_FromLong(num)); PyTuple_SET_ITEM(result, 1, PyStr_FromString(PQfname(self->result, num))); PyTuple_SET_ITEM(result, 2, PyInt_FromLong(PQftype(self->result, num))); PyTuple_SET_ITEM(result, 3, PyInt_FromLong(PQfsize(self->result, num))); PyTuple_SET_ITEM(result, 4, PyInt_FromLong(PQfmod(self->result, num))); return result; } /* Lists fields info. */ static char source_listinfo__doc__[] = "listinfo() -- get information for all fields (position, name, type oid)"; static PyObject * source_listInfo(sourceObject *self, PyObject *noargs) { PyObject *result, *info; int i; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* builds result */ if (!(result = PyTuple_New(self->num_fields))) { return NULL; } for (i = 0; i < self->num_fields; ++i) { info = _source_buildinfo(self, i); if (!info) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, i, info); } /* returns result */ return result; } /* List fields information for last result. */ static char source_fieldinfo__doc__[] = "fieldinfo(desc) -- get specified field info (position, name, type oid)"; static PyObject * source_fieldinfo(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method fieldinfo() needs a string or integer as argument")) == -1) { return NULL; } /* returns result */ return _source_buildinfo(self, num); } /* Retrieve field value. */ static char source_field__doc__[] = "field(desc) -- return specified field value"; static PyObject * source_field(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method field() needs a string or integer as argument")) == -1) { return NULL; } return PyStr_FromString( PQgetvalue(self->result, self->current_row, num)); } /* Get the list of source object attributes. */ static PyObject * source_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *) self)); PyObject_CallMethod( attrs, "extend", "[sssss]", "pgcnx", "arraysize", "resulttype", "ntuples", "nfields"); return attrs; } /* Source object methods */ static PyMethodDef source_methods[] = { {"__dir__", (PyCFunction) source_dir, METH_NOARGS, NULL}, {"close", (PyCFunction) source_close, METH_NOARGS, source_close__doc__}, {"execute", (PyCFunction) source_execute, METH_O, source_execute__doc__}, {"oidstatus", (PyCFunction) source_oidstatus, METH_NOARGS, source_oidstatus__doc__}, {"fetch", (PyCFunction) source_fetch, METH_VARARGS, source_fetch__doc__}, {"movefirst", (PyCFunction) source_movefirst, METH_NOARGS, source_movefirst__doc__}, {"movelast", (PyCFunction) source_movelast, METH_NOARGS, source_movelast__doc__}, {"movenext", (PyCFunction) source_movenext, METH_NOARGS, source_movenext__doc__}, {"moveprev", (PyCFunction) source_moveprev, METH_NOARGS, source_moveprev__doc__}, {"putdata", (PyCFunction) source_putdata, METH_O, source_putdata__doc__}, {"getdata", (PyCFunction) source_getdata, METH_VARARGS, source_getdata__doc__}, {"field", (PyCFunction) source_field, METH_O, source_field__doc__}, {"fieldinfo", (PyCFunction) source_fieldinfo, METH_O, source_fieldinfo__doc__}, {"listinfo", (PyCFunction) source_listInfo, METH_NOARGS, source_listinfo__doc__}, {NULL, NULL} }; static char source__doc__[] = "PyGreSQL source object"; /* Source type definition */ static PyTypeObject sourceType = { PyVarObject_HEAD_INIT(NULL, 0) "pgdb.Source", /* tp_name */ sizeof(sourceObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor) source_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ (setattrfunc) source_setattr, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) source_str, /* tp_str */ (getattrofunc) source_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ source__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ source_methods, /* tp_methods */ }; pygresql-5.1.2/pgtypes.h000066400000000000000000000107161365010227600152400ustar00rootroot00000000000000/* pgtypes - PostgreSQL type definitions These are the standard PostgreSQL 11.1 built-in types, extracted from src/backend/catalog/pg_type_d.h, because that header file is sometimes not available or needs other header files to get properly included. You can also query pg_type to get this information. */ #ifndef PG_TYPE_D_H #define BOOLOID 16 #define BYTEAOID 17 #define CHAROID 18 #define NAMEOID 19 #define INT8OID 20 #define INT2OID 21 #define INT2VECTOROID 22 #define INT4OID 23 #define REGPROCOID 24 #define TEXTOID 25 #define OIDOID 26 #define TIDOID 27 #define XIDOID 28 #define CIDOID 29 #define OIDVECTOROID 30 #define JSONOID 114 #define XMLOID 142 #define XMLARRAYOID 143 #define JSONARRAYOID 199 #define PGNODETREEOID 194 #define PGNDISTINCTOID 3361 #define PGDEPENDENCIESOID 3402 #define PGDDLCOMMANDOID 32 #define SMGROID 210 #define POINTOID 600 #define LSEGOID 601 #define PATHOID 602 #define BOXOID 603 #define POLYGONOID 604 #define LINEOID 628 #define LINEARRAYOID 629 #define FLOAT4OID 700 #define FLOAT8OID 701 #define ABSTIMEOID 702 #define RELTIMEOID 703 #define TINTERVALOID 704 #define UNKNOWNOID 705 #define CIRCLEOID 718 #define CIRCLEARRAYOID 719 #define CASHOID 790 #define MONEYARRAYOID 791 #define MACADDROID 829 #define INETOID 869 #define CIDROID 650 #define MACADDR8OID 774 #define BOOLARRAYOID 1000 #define BYTEAARRAYOID 1001 #define CHARARRAYOID 1002 #define NAMEARRAYOID 1003 #define INT2ARRAYOID 1005 #define INT2VECTORARRAYOID 1006 #define INT4ARRAYOID 1007 #define REGPROCARRAYOID 1008 #define TEXTARRAYOID 1009 #define OIDARRAYOID 1028 #define TIDARRAYOID 1010 #define XIDARRAYOID 1011 #define CIDARRAYOID 1012 #define OIDVECTORARRAYOID 1013 #define BPCHARARRAYOID 1014 #define VARCHARARRAYOID 1015 #define INT8ARRAYOID 1016 #define POINTARRAYOID 1017 #define LSEGARRAYOID 1018 #define PATHARRAYOID 1019 #define BOXARRAYOID 1020 #define FLOAT4ARRAYOID 1021 #define FLOAT8ARRAYOID 1022 #define ABSTIMEARRAYOID 1023 #define RELTIMEARRAYOID 1024 #define TINTERVALARRAYOID 1025 #define POLYGONARRAYOID 1027 #define ACLITEMOID 1033 #define ACLITEMARRAYOID 1034 #define MACADDRARRAYOID 1040 #define MACADDR8ARRAYOID 775 #define INETARRAYOID 1041 #define CIDRARRAYOID 651 #define CSTRINGARRAYOID 1263 #define BPCHAROID 1042 #define VARCHAROID 1043 #define DATEOID 1082 #define TIMEOID 1083 #define TIMESTAMPOID 1114 #define TIMESTAMPARRAYOID 1115 #define DATEARRAYOID 1182 #define TIMEARRAYOID 1183 #define TIMESTAMPTZOID 1184 #define TIMESTAMPTZARRAYOID 1185 #define INTERVALOID 1186 #define INTERVALARRAYOID 1187 #define NUMERICARRAYOID 1231 #define TIMETZOID 1266 #define TIMETZARRAYOID 1270 #define BITOID 1560 #define BITARRAYOID 1561 #define VARBITOID 1562 #define VARBITARRAYOID 1563 #define NUMERICOID 1700 #define REFCURSOROID 1790 #define REFCURSORARRAYOID 2201 #define REGPROCEDUREOID 2202 #define REGOPEROID 2203 #define REGOPERATOROID 2204 #define REGCLASSOID 2205 #define REGTYPEOID 2206 #define REGROLEOID 4096 #define REGNAMESPACEOID 4089 #define REGPROCEDUREARRAYOID 2207 #define REGOPERARRAYOID 2208 #define REGOPERATORARRAYOID 2209 #define REGCLASSARRAYOID 2210 #define REGTYPEARRAYOID 2211 #define REGROLEARRAYOID 4097 #define REGNAMESPACEARRAYOID 4090 #define UUIDOID 2950 #define UUIDARRAYOID 2951 #define LSNOID 3220 #define PG_LSNARRAYOID 3221 #define TSVECTOROID 3614 #define GTSVECTOROID 3642 #define TSQUERYOID 3615 #define REGCONFIGOID 3734 #define REGDICTIONARYOID 3769 #define TSVECTORARRAYOID 3643 #define GTSVECTORARRAYOID 3644 #define TSQUERYARRAYOID 3645 #define REGCONFIGARRAYOID 3735 #define REGDICTIONARYARRAYOID 3770 #define JSONBOID 3802 #define JSONBARRAYOID 3807 #define TXID_SNAPSHOTOID 2970 #define TXID_SNAPSHOTARRAYOID 2949 #define INT4RANGEOID 3904 #define INT4RANGEARRAYOID 3905 #define NUMRANGEOID 3906 #define NUMRANGEARRAYOID 3907 #define TSRANGEOID 3908 #define TSRANGEARRAYOID 3909 #define TSTZRANGEOID 3910 #define TSTZRANGEARRAYOID 3911 #define DATERANGEOID 3912 #define DATERANGEARRAYOID 3913 #define INT8RANGEOID 3926 #define INT8RANGEARRAYOID 3927 #define RECORDOID 2249 #define RECORDARRAYOID 2287 #define CSTRINGOID 2275 #define ANYOID 2276 #define ANYARRAYOID 2277 #define VOIDOID 2278 #define TRIGGEROID 2279 #define EVTTRIGGEROID 3838 #define LANGUAGE_HANDLEROID 2280 #define INTERNALOID 2281 #define OPAQUEOID 2282 #define ANYELEMENTOID 2283 #define ANYNONARRAYOID 2776 #define ANYENUMOID 3500 #define FDW_HANDLEROID 3115 #define INDEX_AM_HANDLEROID 325 #define TSM_HANDLEROID 3310 #define ANYRANGEOID 3831 #endif /* PG_TYPE_D_H */ pygresql-5.1.2/py3c.h000066400000000000000000000077461365010227600144340ustar00rootroot00000000000000/* Copyright (c) 2015, Red Hat, Inc. and/or its affiliates * Licensed under the MIT license; see py3c.h */ #ifndef _PY3C_COMPAT_H_ #define _PY3C_COMPAT_H_ #define PY_SSIZE_T_CLEAN #include #if PY_MAJOR_VERSION >= 3 /***** Python 3 *****/ #define IS_PY3 1 /* Strings */ #define PyStr_Type PyUnicode_Type #define PyStr_Check PyUnicode_Check #define PyStr_CheckExact PyUnicode_CheckExact #define PyStr_FromString PyUnicode_FromString #define PyStr_FromStringAndSize PyUnicode_FromStringAndSize #define PyStr_FromFormat PyUnicode_FromFormat #define PyStr_FromFormatV PyUnicode_FromFormatV #define PyStr_AsString PyUnicode_AsUTF8 #define PyStr_Concat PyUnicode_Concat #define PyStr_Format PyUnicode_Format #define PyStr_InternInPlace PyUnicode_InternInPlace #define PyStr_InternFromString PyUnicode_InternFromString #define PyStr_Decode PyUnicode_Decode #define PyStr_AsUTF8String PyUnicode_AsUTF8String // returns PyBytes #define PyStr_AsUTF8 PyUnicode_AsUTF8 #define PyStr_AsUTF8AndSize PyUnicode_AsUTF8AndSize /* Ints */ #define PyInt_Type PyLong_Type #define PyInt_Check PyLong_Check #define PyInt_CheckExact PyLong_CheckExact #define PyInt_FromString PyLong_FromString #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyInt_AsSsize_t PyLong_AsSsize_t /* Module init */ #define MODULE_INIT_FUNC(name) \ PyMODINIT_FUNC PyInit_ ## name(void); \ PyMODINIT_FUNC PyInit_ ## name(void) /* Other */ #define Py_TPFLAGS_HAVE_ITER 0 // not needed in Python 3 #else /***** Python 2 *****/ #define IS_PY3 0 /* Strings */ #define PyStr_Type PyString_Type #define PyStr_Check PyString_Check #define PyStr_CheckExact PyString_CheckExact #define PyStr_FromString PyString_FromString #define PyStr_FromStringAndSize PyString_FromStringAndSize #define PyStr_FromFormat PyString_FromFormat #define PyStr_FromFormatV PyString_FromFormatV #define PyStr_AsString PyString_AsString #define PyStr_Format PyString_Format #define PyStr_InternInPlace PyString_InternInPlace #define PyStr_InternFromString PyString_InternFromString #define PyStr_Decode PyString_Decode static inline PyObject *PyStr_Concat(PyObject *left, PyObject *right) { PyObject *str = left; Py_INCREF(left); // reference to old left will be stolen PyString_Concat(&str, right); if (str) { return str; } else { return NULL; } } #define PyStr_AsUTF8String(str) (Py_INCREF(str), (str)) #define PyStr_AsUTF8 PyString_AsString #define PyStr_AsUTF8AndSize(pystr, sizeptr) \ ((*sizeptr=PyString_Size(pystr)), PyString_AsString(pystr)) #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_FromFormatV PyString_FromFormatV #define PyBytes_Size PyString_Size #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_AsString PyString_AsString #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #define _PyBytes_Resize _PyString_Resize /* Floats */ #define PyFloat_FromString(str) PyFloat_FromString(str, NULL) /* Module init */ #define PyModuleDef_HEAD_INIT 0 typedef struct PyModuleDef { int m_base; const char* m_name; const char* m_doc; Py_ssize_t m_size; PyMethodDef *m_methods; } PyModuleDef; #define PyModule_Create(def) \ Py_InitModule3((def)->m_name, (def)->m_methods, (def)->m_doc) #define MODULE_INIT_FUNC(name) \ static PyObject *PyInit_ ## name(void); \ void init ## name(void); \ void init ## name(void) { PyInit_ ## name(); } \ static PyObject *PyInit_ ## name(void) #endif #endif pygresql-5.1.2/setup.cfg000066400000000000000000000000461365010227600152100ustar00rootroot00000000000000[egg_info] tag_build = tag_date = 0 pygresql-5.1.2/setup.py000077500000000000000000000235731365010227600151160ustar00rootroot00000000000000#!/usr/bin/python # # PyGreSQL - a Python interface for the PostgreSQL database. # # Copyright (c) 2020 by the PyGreSQL Development Team # # Please see the LICENSE.TXT file for specific restrictions. """Setup script for PyGreSQL version 5.1.2 PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. Authors and history: * PyGreSQL written 1997 by D'Arcy J.M. Cain * based on code written 1995 by Pascal Andre * setup script created 2000 by Mark Alexander * improved 2000 by Jeremy Hylton * improved 2001 by Gerhard Haering * improved 2006 to 2018 by Christoph Zwerschke Prerequisites to be installed: * Python including devel package (header files and distutils) * PostgreSQL libs and devel packages (header file of the libpq client) * PostgreSQL pg_config tool (usually included in the devel package) (the Windows installer has it as part of the database server feature) PyGreSQL currently supports Python versions 2.6, 2.7 and 3.3 to 3.8, and PostgreSQL versions 9.0 to 9.6 and 10 to 12. Use as follows: python setup.py build_ext # to build the module python setup.py install # to install it See docs.python.org/doc/install/ for more information on using distutils to install Python programs. """ import os import platform import re import sys import warnings try: from setuptools import setup except ImportError: from distutils.core import setup from distutils.extension import Extension from distutils.command.build_ext import build_ext from distutils.ccompiler import get_default_compiler from distutils.sysconfig import get_python_inc, get_python_lib version = '5.1.2' if (not (2, 6) <= sys.version_info[:2] < (3, 0) and not (3, 3) <= sys.version_info[:2] < (4, 0)): raise Exception( "Sorry, PyGreSQL %s does not support this Python version" % version) # For historical reasons, PyGreSQL does not install itself as a single # "pygresql" package, but as two top-level modules "pg", providing the # classic interface, and "pgdb" for the modern DB-API 2.0 interface. # These two top-level Python modules share the same C extension "_pg". py_modules = ['pg', 'pgdb'] c_sources = ['pgmodule.c'] def pg_config(s): """Retrieve information about installed version of PostgreSQL.""" f = os.popen('pg_config --%s' % s) d = f.readline().strip() if f.close() is not None: raise Exception("pg_config tool is not available.") if not d: raise Exception("Could not get %s information." % s) return d def pg_version(): """Return the PostgreSQL version as a tuple of integers.""" match = re.search(r'(\d+)\.(\d+)', pg_config('version')) if match: return tuple(map(int, match.groups())) return 9, 0 pg_version = pg_version() libraries = ['pq'] # Make sure that the Python header files are searched before # those of PostgreSQL, because PostgreSQL can have its own Python.h include_dirs = [get_python_inc(), pg_config('includedir')] library_dirs = [get_python_lib(), pg_config('libdir')] define_macros = [('PYGRESQL_VERSION', version)] undef_macros = [] extra_compile_args = ['-O2', '-funsigned-char', '-Wall', '-Wconversion'] class build_pg_ext(build_ext): """Customized build_ext command for PyGreSQL.""" description = "build the PyGreSQL C extension" user_options = build_ext.user_options + [ ('strict', None, "count all compiler warnings as errors"), ('direct-access', None, "enable direct access functions"), ('no-direct-access', None, "disable direct access functions"), ('direct-access', None, "enable direct access functions"), ('no-direct-access', None, "disable direct access functions"), ('large-objects', None, "enable large object support"), ('no-large-objects', None, "disable large object support"), ('default-vars', None, "enable default variables use"), ('no-default-vars', None, "disable default variables use"), ('escaping-funcs', None, "enable string escaping functions"), ('no-escaping-funcs', None, "disable string escaping functions"), ('ssl-info', None, "use new ssl info functions"), ('no-ssl-info', None, "do not use new ssl info functions")] boolean_options = build_ext.boolean_options + [ 'strict', 'direct-access', 'large-objects', 'default-vars', 'escaping-funcs', 'ssl-info'] negative_opt = { 'no-direct-access': 'direct-access', 'no-large-objects': 'large-objects', 'no-default-vars': 'default-vars', 'no-escaping-funcs': 'escaping-funcs', 'no-ssl-info': 'ssl-info'} def get_compiler(self): """Return the C compiler used for building the extension.""" return self.compiler or get_default_compiler() def initialize_options(self): build_ext.initialize_options(self) self.strict = False self.direct_access = None self.large_objects = None self.default_vars = None self.escaping_funcs = None self.ssl_info = None if pg_version < (9, 0): warnings.warn( "PyGreSQL does not support the installed PostgreSQL version.") def finalize_options(self): """Set final values for all build_pg options.""" build_ext.finalize_options(self) if self.strict: extra_compile_args.append('-Werror') if self.direct_access is None or self.direct_access: define_macros.append(('DIRECT_ACCESS', None)) if self.large_objects is None or self.large_objects: define_macros.append(('LARGE_OBJECTS', None)) if self.default_vars is None or self.default_vars: define_macros.append(('DEFAULT_VARS', None)) if self.escaping_funcs is None or self.escaping_funcs: if pg_version >= (9, 0): define_macros.append(('ESCAPING_FUNCS', None)) else: (warnings.warn if self.escaping_funcs is None else sys.exit)( "The installed PostgreSQL version" " does not support the newer string escaping functions.") if self.ssl_info is None or self.ssl_info: if pg_version >= (9, 5): define_macros.append(('SSL_INFO', None)) else: (warnings.warn if self.ssl_info is None else sys.exit)( "The installed PostgreSQL version" " does not support ssl info functions.") if sys.platform == 'win32': bits = platform.architecture()[0] if bits == '64bit': # we need to find libpq64 for path in os.environ['PATH'].split(os.pathsep) + [ r'C:\Program Files\PostgreSQL\libpq64']: library_dir = os.path.join(path, 'lib') if not os.path.isdir(library_dir): continue lib = os.path.join(library_dir, 'libpqdll.') if not (os.path.exists(lib + 'lib') or os.path.exists(lib + 'a')): continue include_dir = os.path.join(path, 'include') if not os.path.isdir(include_dir): continue if library_dir not in library_dirs: library_dirs.insert(1, library_dir) if include_dir not in include_dirs: include_dirs.insert(1, include_dir) libraries[0] += 'dll' # libpqdll instead of libpq break compiler = self.get_compiler() if compiler == 'mingw32': # MinGW if bits == '64bit': # needs MinGW-w64 define_macros.append(('MS_WIN64', None)) elif compiler == 'msvc': # Microsoft Visual C++ libraries[0] = 'lib' + libraries[0] extra_compile_args[1:] = [ '-J', '-W3', '-WX', '-Dinline=__inline'] # needed for MSVC 9 setup( name="PyGreSQL", version=version, description="Python PostgreSQL Interfaces", long_description=__doc__.split('\n\n', 2)[1], # first passage long_description_content_type = 'text/plain', keywords="pygresql postgresql database api dbapi", author="D'Arcy J. M. Cain", author_email="darcy@PyGreSQL.org", url="http://www.pygresql.org", download_url="http://www.pygresql.org/download/", platforms=["any"], license="PostgreSQL", py_modules=py_modules, ext_modules=[Extension( '_pg', c_sources, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros, undef_macros=undef_macros, libraries=libraries, extra_compile_args=extra_compile_args)], zip_safe=False, cmdclass=dict(build_ext=build_pg_ext), test_suite='tests.discover', classifiers=[ "Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: PostgreSQL License", "Operating System :: OS Independent", "Programming Language :: C", 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', "Programming Language :: SQL", "Topic :: Database", "Topic :: Database :: Front-Ends", "Topic :: Software Development :: Libraries :: Python Modules"] ) pygresql-5.1.2/tests/000077500000000000000000000000001365010227600145315ustar00rootroot00000000000000pygresql-5.1.2/tests/__init__.py000066400000000000000000000010551365010227600166430ustar00rootroot00000000000000"""PyGreSQL test suite. You can specify your local database settings in LOCAL_PyGreSQL.py. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest if not (hasattr(unittest, 'skip') and hasattr(unittest.TestCase, 'setUpClass') and hasattr(unittest.TestCase, 'skipTest') and hasattr(unittest.TestCase, 'assertIn')): raise ImportError('Please install a newer version of unittest') def discover(): loader = unittest.TestLoader() suite = loader.discover('.') return suitepygresql-5.1.2/tests/dbapi20.py000066400000000000000000000710201365010227600163240ustar00rootroot00000000000000#!/usr/bin/python '''Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. ''' __version__ = '1.5' try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import time class DatabaseAPI20Test(unittest.TestCase): ''' Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compiliance with the DB-API. It is expected that this TestCase may be expanded in the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. ''' # The self.driver module. This should be the module where the 'connect' # method is to be found driver = None connect_args = () # List of arguments to pass to connect connect_kw_args = {} # Keyword arguments for connect table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix xddl1 = 'drop table %sbooze' % table_prefix xddl2 = 'drop table %sbarflys' % table_prefix lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def executeDDL1(self,cursor): cursor.execute(self.ddl1) def executeDDL2(self,cursor): cursor.execute(self.ddl2) def setUp(self): """self.drivers should override this method to perform required setup if any is necessary, such as creating the database. """ pass def tearDown(self): """self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. """ con = self._connect() try: cur = con.cursor() for ddl in (self.xddl1,self.xddl2): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() def _connect(self): try: return self.driver.connect( *self.connect_args,**self.connect_kw_args ) except AttributeError: self.fail("No connect method found in self.driver module") def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel,'2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.assertTrue(threadsafety in (0,1,2,3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.assertTrue(paramstyle in ( 'qmark','numeric','named','format','pyformat' )) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_Exceptions(self): """Make sure required exceptions exist, and are in the defined hierarchy. """ self.assertTrue(issubclass(self.driver.Warning,Exception)) self.assertTrue(issubclass(self.driver.Error,Exception)) self.assertTrue( issubclass(self.driver.InterfaceError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.DatabaseError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.OperationalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.IntegrityError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.InternalError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.ProgrammingError,self.driver.Error) ) self.assertTrue( issubclass(self.driver.NotSupportedError,self.driver.Error) ) def test_ExceptionsAsConnectionAttributes(self): """Optional extension Test for the optional DB API 2.0 extension, where the exceptions are exposed as attributes on the Connection object I figure this optional extension will be implemented by any driver author who is using this test suite, so it is enabled by default. """ con = self._connect() drv = self.driver self.assertTrue(con.Warning is drv.Warning) self.assertTrue(con.Error is drv.Error) self.assertTrue(con.InterfaceError is drv.InterfaceError) self.assertTrue(con.DatabaseError is drv.DatabaseError) self.assertTrue(con.OperationalError is drv.OperationalError) self.assertTrue(con.IntegrityError is drv.IntegrityError) self.assertTrue(con.InternalError is drv.InternalError) self.assertTrue(con.ProgrammingError is drv.ProgrammingError) self.assertTrue(con.NotSupportedError is drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con,'rollback'): try: con.rollback() except self.driver.NotSupportedError: pass def test_cursor(self): con = self._connect() try: cur = con.cursor() finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.executeDDL1(cur1) cur1.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) cur2.execute("select name from %sbooze" % self.table_prefix) booze = cur2.fetchall() self.assertEqual(len(booze),1) self.assertEqual(len(booze[0]),1) self.assertEqual(booze[0][0],'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.description,None, 'cursor.description should be none after executing a ' 'statement that can return no rows (such as DDL)' ) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(len(cur.description),1, 'cursor.description describes too many columns' ) self.assertEqual(len(cur.description[0]),7, 'cursor.description[x] tuples must have 7 elements' ) self.assertEqual(cur.description[0][0].lower(),'name', 'cursor.description[x][0] must return column name' ) self.assertEqual(cur.description[0][1],self.driver.STRING, 'cursor.description[x][1] must return column type. Got %r' % cur.description[0][1] ) # Make sure self.description gets reset self.executeDDL2(cur) self.assertEqual(cur.description,None, 'cursor.description not being set to None when executing ' 'no-result statements (eg. DDL)' ) finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount should be -1 after executing no-result ' 'statements' ) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number or rows inserted, or ' 'set to -1 after executing an insert statement' ) cur.execute("select name from %sbooze" % self.table_prefix) self.assertTrue(cur.rowcount in (-1,1), 'cursor.rowcount should == number of rows returned, or ' 'set to -1 after executing a select statement' ) self.executeDDL2(cur) self.assertEqual(cur.rowcount,-1, 'cursor.rowcount not being reset to -1 after executing ' 'no-result statements' ) finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur,'callproc'): r = cur.callproc(self.lower_func,('FOO',)) self.assertEqual(len(r),1) self.assertEqual(r[0],'FOO') r = cur.fetchall() self.assertEqual(len(r),1,'callproc produced no result set') self.assertEqual(len(r[0]),1, 'callproc produced invalid result set' ) self.assertEqual(r[0][0],'foo', 'callproc produced invalid results' ) finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error,self.executeDDL1,cur) # connection.commit should raise an Error if called after connection' # closed.' self.assertRaises(self.driver.Error,con.commit) # connection.close should raise an Error if called more than once self.assertRaises(self.driver.Error,con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self,cur): self.executeDDL1(cur) cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertTrue(cur.rowcount in (-1,1)) if self.driver.paramstyle == 'qmark': cur.execute( 'insert into %sbooze values (?)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'numeric': cur.execute( 'insert into %sbooze values (:1)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'named': cur.execute( 'insert into %sbooze values (:beer)' % self.table_prefix, {'beer':"Cooper's"} ) elif self.driver.paramstyle == 'format': cur.execute( 'insert into %sbooze values (%%s)' % self.table_prefix, ("Cooper's",) ) elif self.driver.paramstyle == 'pyformat': cur.execute( 'insert into %sbooze values (%%(beer)s)' % self.table_prefix, {'beer':"Cooper's"} ) else: self.fail('Invalid paramstyle') self.assertTrue(cur.rowcount in (-1,1)) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2,'cursor.fetchall returned too few rows') beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) self.assertEqual(beers[1],"Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted ' 'incorrectly' ) def test_executemany(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) largs = [ ("Cooper's",) , ("Boag's",) ] margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ] if self.driver.paramstyle == 'qmark': cur.executemany( 'insert into %sbooze values (?)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'numeric': cur.executemany( 'insert into %sbooze values (:1)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'named': cur.executemany( 'insert into %sbooze values (:beer)' % self.table_prefix, margs ) elif self.driver.paramstyle == 'format': cur.executemany( 'insert into %sbooze values (%%s)' % self.table_prefix, largs ) elif self.driver.paramstyle == 'pyformat': cur.executemany( 'insert into %sbooze values (%%(beer)s)' % ( self.table_prefix ), margs ) else: self.fail('Unknown paramstyle') self.assertTrue(cur.rowcount in (-1,2), 'insert using cursor.executemany set cursor.rowcount to ' 'incorrect value %r' % cur.rowcount ) cur.execute('select name from %sbooze' % self.table_prefix) res = cur.fetchall() self.assertEqual(len(res),2, 'cursor.fetchall retrieved incorrect number of rows' ) beers = [res[0][0],res[1][0]] beers.sort() self.assertEqual(beers[0],"Boag's",'incorrect data retrieved') self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error,cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows self.executeDDL1(cur) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if a query retrieves ' 'no rows' ) self.assertTrue(cur.rowcount in (-1,0)) # cursor.fetchone should raise an Error if called after # executing a query that cannnot return rows cur.execute("insert into %sbooze values ('Victoria Bitter')" % ( self.table_prefix )) self.assertRaises(self.driver.Error,cur.fetchone) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchone() self.assertEqual(len(r),1, 'cursor.fetchone should have retrieved a single row' ) self.assertEqual(r[0],'Victoria Bitter', 'cursor.fetchone retrieved incorrect data' ) self.assertEqual(cur.fetchone(),None, 'cursor.fetchone should return None if no more rows available' ) self.assertTrue(cur.rowcount in (-1,1)) finally: con.close() samples = [ 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ] def _populate(self): """Return a list of sql commands to setup the DB for the fetch tests. """ populate = [ "insert into %sbooze values ('%s')" % (self.table_prefix,s) for s in self.samples ] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without #issuing a query self.assertRaises(self.driver.Error,cur.fetchmany,4) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() self.assertEqual(len(r),1, 'cursor.fetchmany retrieved incorrect number of rows, ' 'default of arraysize is one.' ) cur.arraysize=10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual(len(r),3, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should get 2 more self.assertEqual(len(r),2, 'cursor.fetchmany retrieved incorrect number of rows' ) r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence after ' 'results are exhausted' ) self.assertTrue(cur.rowcount in (-1,6)) # Same as above, using cursor.arraysize cur.arraysize=4 cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchmany() # Should get 4 rows self.assertEqual(len(r),4, 'cursor.arraysize not being honoured by fetchmany' ) r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r),2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r),0) self.assertTrue(cur.rowcount in (-1,6)) cur.arraysize=6 cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchmany() # Should get all rows self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows),6) self.assertEqual(len(rows),6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0,6): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved by cursor.fetchmany' ) rows = cur.fetchmany() # Should return an empty list self.assertEqual(len(rows),0, 'cursor.fetchmany should return an empty sequence if ' 'called after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,6)) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) r = cur.fetchmany() # Should get empty sequence self.assertEqual(len(r),0, 'cursor.fetchmany should return an empty sequence if ' 'query retrieved no rows' ) self.assertTrue(cur.rowcount in (-1,0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error,cur.fetchall) cur.execute('select name from %sbooze' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.assertEqual(len(rows),len(self.samples), 'cursor.fetchall did not retrieve all rows' ) rows = [r[0] for r in rows] rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'cursor.fetchall retrieved incorrect rows' ) rows = cur.fetchall() self.assertEqual( len(rows),0, 'cursor.fetchall should return an empty list if called ' 'after the whole result set has been fetched' ) self.assertTrue(cur.rowcount in (-1,len(self.samples))) self.executeDDL2(cur) cur.execute('select name from %sbarflys' % self.table_prefix) rows = cur.fetchall() self.assertTrue(cur.rowcount in (-1,0)) self.assertEqual(len(rows),0, 'cursor.fetchall should return an empty list if ' 'a select query returns no rows' ) finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) for sql in self._populate(): cur.execute(sql) cur.execute('select name from %sbooze' % self.table_prefix) rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.assertTrue(cur.rowcount in (-1,6)) self.assertEqual(len(rows23),2, 'fetchmany returned incorrect number of rows' ) self.assertEqual(len(rows56),2, 'fetchall returned incorrect number of rows' ) rows = [rows1[0]] rows.extend([rows23[0][0],rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0],rows56[1][0]]) rows.sort() for i in range(0,len(self.samples)): self.assertEqual(rows[i],self.samples[i], 'incorrect data retrieved or inserted' ) finally: con.close() def help_nextset_setUp(self, cur): """Should create a procedure called deleteme that returns two result sets, first the number of rows in booze then "name from booze" """ if False: sql = """ create procedure deleteme as begin select count(*) from booze select name from booze end """ cur.execute(sql) else: raise NotImplementedError('Helper not implemented') def help_nextset_tearDown(self, cur): """If cleaning up is needed after nextSetTest""" if False: cur.execute("drop procedure deleteme") else: raise NotImplementedError('Helper not implemented') def test_nextset(self): con = self._connect() try: cur = con.cursor() if not hasattr(cur,'nextset'): return try: self.executeDDL1(cur) sql=self._populate() for sql in self._populate(): cur.execute(sql) self.help_nextset_setUp(cur) cur.callproc('deleteme') numberofrows=cur.fetchone() assert numberofrows[0]== len(self.samples) assert cur.nextset() names=cur.fetchall() assert len(names) == len(self.samples) s=cur.nextset() assert s == None,'No more return sets, should return None' finally: self.help_nextset_tearDown(cur) finally: con.close() def test_arraysize(self): """Not much here - rest of the tests for this are in test_fetchmany""" con = self._connect() try: cur = con.cursor() self.assertTrue(hasattr(cur,'arraysize'), 'cursor.arraysize must be defined' ) finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes( (25,) ) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): """Basic test is to make sure setoutputsize doesn't blow up""" con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000,0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): """Real test for setoutputsize is driver dependant""" raise NotImplementedError('Driver needs to override this test') def test_None(self): con = self._connect() try: cur = con.cursor() self.executeDDL1(cur) cur.execute('insert into %sbooze values (NULL)' % self.table_prefix) cur.execute('select name from %sbooze' % self.table_prefix) r = cur.fetchall() self.assertEqual(len(r),1) self.assertEqual(len(r[0]),1) self.assertEqual(r[0][0],None,'NULL value not returned as None') finally: con.close() def test_Date(self): d1 = self.driver.Date(2002,12,25) d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(d1),str(d2)) def test_Time(self): t1 = self.driver.Time(13,45,30) t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0))) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Timestamp(self): t1 = self.driver.Timestamp(2002,12,25,13,45,30) t2 = self.driver.TimestampFromTicks( time.mktime((2002,12,25,13,45,30,0,0,0)) ) # Can we assume this? API doesn't specify, but it seems implied # self.assertEqual(str(t1),str(t2)) def test_Binary(self): b = self.driver.Binary(b'Something') b = self.driver.Binary(b'') def test_STRING(self): self.assertTrue(hasattr(self.driver,'STRING'), 'module.STRING must be defined' ) def test_BINARY(self): self.assertTrue(hasattr(self.driver,'BINARY'), 'module.BINARY must be defined.' ) def test_NUMBER(self): self.assertTrue(hasattr(self.driver,'NUMBER'), 'module.NUMBER must be defined.' ) def test_DATETIME(self): self.assertTrue(hasattr(self.driver,'DATETIME'), 'module.DATETIME must be defined.' ) def test_ROWID(self): self.assertTrue(hasattr(self.driver,'ROWID'), 'module.ROWID must be defined.' ) pygresql-5.1.2/tests/test_classic.py000077500000000000000000000270711365010227600175750ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import sys from functools import partial from time import sleep from threading import Thread from pg import * # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass def opendb(): db = DB(dbname, dbhost, dbport) db.query("SET DATESTYLE TO 'ISO'") db.query("SET TIME ZONE 'EST5EDT'") db.query("SET DEFAULT_WITH_OIDS=FALSE") db.query("SET CLIENT_MIN_MESSAGES=WARNING") db.query("SET STANDARD_CONFORMING_STRINGS=FALSE") return db db = opendb() for q in ( "DROP TABLE _test1._test_schema", "DROP TABLE _test2._test_schema", "DROP SCHEMA _test1", "DROP SCHEMA _test2", ): try: db.query(q) except Exception: pass db.close() class UtilityTest(unittest.TestCase): def setUp(self): """Setup test tables or empty them if they already exist.""" db = opendb() for t in ('_test1', '_test2'): try: db.query("CREATE SCHEMA " + t) except Error: pass try: db.query("CREATE TABLE %s._test_schema " "(%s int PRIMARY KEY)" % (t, t)) except Error: db.query("DELETE FROM %s._test_schema" % t) try: db.query("CREATE TABLE _test_schema " "(_test int PRIMARY KEY, _i interval, dvar int DEFAULT 999)") except Error: db.query("DELETE FROM _test_schema") try: db.query("CREATE VIEW _test_vschema AS " "SELECT _test, 'abc'::text AS _test2 FROM _test_schema") except Error: pass def test_invalidname(self): """Make sure that invalid table names are caught""" db = opendb() self.assertRaises(NotSupportedError, db.get_attnames, 'x.y.z') def test_schema(self): """Does it differentiate the same table name in different schemas""" db = opendb() # see if they differentiate the table names properly self.assertEqual( db.get_attnames('_test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('public._test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('_test1._test_schema'), {'_test1': 'int'} ) self.assertEqual( db.get_attnames('_test2._test_schema'), {'_test2': 'int'} ) def test_pkey(self): db = opendb() self.assertEqual(db.pkey('_test_schema'), '_test') self.assertEqual(db.pkey('public._test_schema'), '_test') self.assertEqual(db.pkey('_test1._test_schema'), '_test1') self.assertEqual(db.pkey('_test2._test_schema'), '_test2') self.assertRaises(KeyError, db.pkey, '_test_vschema') def test_get(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") db.get('_test_schema', 1234) db.get('_test_schema', 1234, keyname='_test') self.assertRaises(ProgrammingError, db.get, '_test_vschema', 1234) db.get('_test_vschema', 1234, keyname='_test') def test_params(self): db = opendb() db.query("INSERT INTO _test_schema VALUES ($1, $2, $3)", 12, None, 34) d = db.get('_test_schema', 12) self.assertEqual(d['dvar'], 34) def test_insert(self): db = opendb() d = dict(_test=1234) db.insert('_test_schema', d) self.assertEqual(d['dvar'], 999) db.insert('_test_schema', _test=1235) self.assertEqual(d['dvar'], 999) def test_context_manager(self): db = opendb() t = '_test_schema' d = dict(_test=1235) with db: db.insert(t, d) d['_test'] += 1 db.insert(t, d) try: with db: d['_test'] += 1 db.insert(t, d) db.insert(t, d) except IntegrityError: pass with db: d['_test'] += 1 db.insert(t, d) d['_test'] += 1 db.insert(t, d) self.assertTrue(db.get(t, 1235)) self.assertTrue(db.get(t, 1236)) self.assertRaises(DatabaseError, db.get, t, 1237) self.assertTrue(db.get(t, 1238)) self.assertTrue(db.get(t, 1239)) def test_sqlstate(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") try: db.query("INSERT INTO _test_schema VALUES (1234)") except DatabaseError as error: self.assertTrue(isinstance(error, IntegrityError)) # the SQLSTATE error code for unique violation is 23505 self.assertEqual(error.sqlstate, '23505') def test_mixed_case(self): db = opendb() try: db.query('CREATE TABLE _test_mc ("_Test" int PRIMARY KEY)') except Error: db.query("DELETE FROM _test_mc") d = dict(_Test=1234) db.insert('_test_mc', d) def test_update(self): db = opendb() db.query("INSERT INTO _test_schema VALUES (1234)") r = db.get('_test_schema', 1234) r['dvar'] = 123 db.update('_test_schema', r) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 123) r = db.get('_test_schema', 1234) self.assertIn('dvar', r) db.update('_test_schema', _test=1234, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) r = db.get('_test_schema', 1234) db.update('_test_schema', r, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) def notify_callback(self, arg_dict): if arg_dict: arg_dict['called'] = True else: self.notify_timeout = True def test_notify(self, options=None): if not options: options = {} run_as_method = options.get('run_as_method') call_notify = options.get('call_notify') two_payloads = options.get('two_payloads') db = opendb() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1'. target = fut('event_1', self.notify_callback, arg_dict, 5) thread = Thread(None, target) thread.start() try: # Wait until the thread has started. for n in range(500): if target.listening: break sleep(0.01) self.assertTrue(target.listening) self.assertTrue(thread.is_alive()) # Open another connection for sending notifications. db2 = opendb() # Generate notification from the other connection. if two_payloads: db2.begin() if call_notify: if two_payloads: target.notify(db2, payload='payload 0') target.notify(db2, payload='payload 1') else: if two_payloads: db2.query("notify event_1, 'payload 0'") db2.query("notify event_1, 'payload 1'") if two_payloads: db2.commit() # Wait until the notification has been caught. for n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'event_1') self.assertEqual(arg_dict['extra'], 'payload 1') self.assertTrue(isinstance(arg_dict['pid'], int)) self.assertFalse(self.notify_timeout) arg_dict['called'] = False self.assertTrue(thread.is_alive()) # Generate stop notification. if call_notify: target.notify(db2, stop=True, payload='payload 2') else: db2.query("notify stop_event_1, 'payload 2'") db2.close() # Wait until the notification has been caught. for n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'stop_event_1') self.assertEqual(arg_dict['extra'], 'payload 2') self.assertTrue(isinstance(arg_dict['pid'], int)) self.assertFalse(self.notify_timeout) thread.join(5) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() except Exception: target.close() if thread.is_alive(): thread.join(5) def test_notify_other_options(self): for run_as_method in False, True: for call_notify in False, True: for two_payloads in False, True: options = dict( run_as_method=run_as_method, call_notify=call_notify, two_payloads=two_payloads) if any(options.values()): self.test_notify(options) def test_notify_timeout(self): for run_as_method in False, True: db = opendb() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1' with timeout of 10ms. target = fut('event_1', self.notify_callback, arg_dict, 0.01) thread = Thread(None, target) thread.start() # Sleep 20ms, long enough to time out. sleep(0.02) # Verify that we've indeed timed out. self.assertFalse(arg_dict.get('called')) self.assertTrue(self.notify_timeout) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() if __name__ == '__main__': if len(sys.argv) == 2 and sys.argv[1] == '-l': print('\n'.join(unittest.getTestCaseNames(UtilityTest, 'test_'))) sys.exit(0) test_list = [name for name in sys.argv[1:] if not name.startswith('-')] if not test_list: test_list = unittest.getTestCaseNames(UtilityTest, 'test_') suite = unittest.TestSuite() for test_name in test_list: try: suite.addTest(UtilityTest(test_name)) except Exception: print("\n ERROR: %s.\n" % sys.exc_value) sys.exit(1) verbosity = '-v' in sys.argv[1:] and 2 or 1 failfast = '-l' in sys.argv[1:] runner = unittest.TextTestRunner(verbosity=verbosity, failfast=failfast) rc = runner.run(suite) sys.exit(1 if rc.errors or rc.failures else 0) pygresql-5.1.2/tests/test_classic_connection.py000077500000000000000000002577331365010227600220260ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the low-level connection object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import threading import time import os from collections import namedtuple try: from collections.abc import Iterable except ImportError: from collections import Iterable from decimal import Decimal import pg # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # These tests should be run with various PostgreSQL versions and databases # created with different encodings and locales. Particularly, make sure the # tests are running against databases created with both SQL_ASCII and UTF8. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str unicode_strings = str is not bytes windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def connect(): """Create a basic pg connection to the test database.""" connection = pg.connect(dbname, dbhost, dbport) connection.query("set client_min_messages=warning") return connection class TestCanConnect(unittest.TestCase): """Test whether a basic connection to PostgreSQL is possible.""" def testCanConnect(self): try: connection = connect() except pg.Error as error: self.fail('Cannot connect to database %s:\n%s' % (dbname, error)) try: connection.close() except pg.Error: self.fail('Cannot close the database connection') class TestConnectObject(unittest.TestCase): """Test existence of basic pg connection methods.""" def setUp(self): self.connection = connect() def tearDown(self): try: self.connection.close() except pg.InternalError: pass def is_method(self, attribute): """Check if given attribute on the connection is a method.""" if do_not_ask_for_host and attribute == 'host': return False return callable(getattr(self.connection, attribute)) def testClassName(self): self.assertEqual(self.connection.__class__.__name__, 'Connection') def testModuleName(self): self.assertEqual(self.connection.__class__.__module__, 'pg') def testStr(self): r = str(self.connection) self.assertTrue(r.startswith(' 5: break r = self.connection.cancel() # cancel the running query thread.join() # wait for the thread to end t2 = time.time() self.assertIsInstance(r, int) self.assertEqual(r, 1) # return code should be 1 self.assertLessEqual(t2 - t1, 3) # time should be under 3 seconds self.assertTrue(errors) def testMethodFileNo(self): r = self.connection.fileno() self.assertIsInstance(r, int) self.assertGreaterEqual(r, 0) def testMethodTransaction(self): transaction = self.connection.transaction self.assertRaises(TypeError, transaction, None) self.assertEqual(transaction(), pg.TRANS_IDLE) self.connection.query('begin') self.assertEqual(transaction(), pg.TRANS_INTRANS) self.connection.query('rollback') self.assertEqual(transaction(), pg.TRANS_IDLE) def testMethodParameter(self): parameter = self.connection.parameter query = self.connection.query self.assertRaises(TypeError, parameter) r = parameter('this server setting does not exist') self.assertIsNone(r) s = query('show server_version').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_version') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) s = query('show client_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('client_encoding') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) class TestSimpleQueries(unittest.TestCase): """Test simple queries via a basic pg connection.""" def setUp(self): self.c = connect() def tearDown(self): self.doCleanups() self.c.close() def testClassName(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__name__, 'Query') def testModuleName(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__module__, 'pg') def testStr(self): q = ("select 1 as a, 'hello' as h, 'w' as world" " union select 2, 'xyz', 'uvw'") r = self.c.query(q) self.assertEqual(str(r), 'a| h |world\n' '-+-----+-----\n' '1|hello|w \n' '2|xyz |uvw \n' '(2 rows)') def testRepr(self): r = repr(self.c.query("select 1")) self.assertTrue(r.startswith('= 120000: self.skipTest("database does not support tables with oids") query = self.c.query query("drop table if exists test_table") self.addCleanup(query, "drop table test_table") q = "create table test_table (n integer) with oids" r = query(q) self.assertIsNone(r) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, int) q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, int) oid = r q = "select oid from test_table where n=2" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, int) self.assertEqual(r, oid) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') class TestUnicodeQueries(unittest.TestCase): """Test unicode strings as queries via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def testGetresulAscii(self): result = u'Hello, world!' q = u"select '%s'" % result v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresulAscii(self): result = u'Hello, world!' q = u"select '%s' as greeting" % result v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def testGetresultUtf8(self): result = u'Hello, wörld & мир!' q = u"select '%s'" % result if not unicode_strings: result = result.encode('utf8') # pass the query as unicode try: v = self.c.query(q).getresult()[0][0] except(pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('utf8') # pass the query as bytes v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresultUtf8(self): result = u'Hello, wörld & мир!' q = u"select '%s' as greeting" % result if not unicode_strings: result = result.encode('utf8') try: v = self.c.query(q).dictresult()[0]['greeting'] except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('utf8') v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresultLatin1(self): try: self.c.query('set client_encoding=latin1') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") result = u'Hello, wörld!' q = u"select '%s'" % result if not unicode_strings: result = result.encode('latin1') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('latin1') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresultLatin1(self): try: self.c.query('set client_encoding=latin1') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") result = u'Hello, wörld!' q = u"select '%s' as greeting" % result if not unicode_strings: result = result.encode('latin1') v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('latin1') v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def testGetresultCyrillic(self): try: self.c.query('set client_encoding=iso_8859_5') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") result = u'Hello, мир!' q = u"select '%s'" % result if not unicode_strings: result = result.encode('cyrillic') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('cyrillic') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresultCyrillic(self): try: self.c.query('set client_encoding=iso_8859_5') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") result = u'Hello, мир!' q = u"select '%s' as greeting" % result if not unicode_strings: result = result.encode('cyrillic') v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('cyrillic') v = self.c.query(q).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def testGetresultLatin9(self): try: self.c.query('set client_encoding=latin9') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") result = u'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)' q = u"select '%s'" % result if not unicode_strings: result = result.encode('latin9') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('latin9') v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def testDictresultLatin9(self): try: self.c.query('set client_encoding=latin9') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") result = u'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)' q = u"select '%s' as menu" % result if not unicode_strings: result = result.encode('latin9') v = self.c.query(q).dictresult()[0]['menu'] self.assertIsInstance(v, str) self.assertEqual(v, result) q = q.encode('latin9') v = self.c.query(q).dictresult()[0]['menu'] self.assertIsInstance(v, str) self.assertEqual(v, result) class TestParamQueries(unittest.TestCase): """Test queries with parameters via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def testQueryWithNoneParam(self): self.assertRaises(TypeError, self.c.query, "select $1", None) self.assertRaises(TypeError, self.c.query, "select $1+$2", None, None) self.assertEqual(self.c.query("select $1::integer", (None,) ).getresult(), [(None,)]) self.assertEqual(self.c.query("select $1::text", [None] ).getresult(), [(None,)]) self.assertEqual(self.c.query("select $1::text", [[None]] ).getresult(), [(None,)]) def testQueryWithBoolParams(self, bool_enabled=None): query = self.c.query if bool_enabled is not None: bool_enabled_default = pg.get_bool() pg.set_bool(bool_enabled) try: bool_on = bool_enabled or bool_enabled is None v_false, v_true = (False, True) if bool_on else 'ft' r_false, r_true = [(v_false,)], [(v_true,)] self.assertEqual(query("select false").getresult(), r_false) self.assertEqual(query("select true").getresult(), r_true) q = "select $1::bool" self.assertEqual(query(q, (None,)).getresult(), [(None,)]) self.assertEqual(query(q, ('f',)).getresult(), r_false) self.assertEqual(query(q, ('t',)).getresult(), r_true) self.assertEqual(query(q, ('false',)).getresult(), r_false) self.assertEqual(query(q, ('true',)).getresult(), r_true) self.assertEqual(query(q, ('n',)).getresult(), r_false) self.assertEqual(query(q, ('y',)).getresult(), r_true) self.assertEqual(query(q, (0,)).getresult(), r_false) self.assertEqual(query(q, (1,)).getresult(), r_true) self.assertEqual(query(q, (False,)).getresult(), r_false) self.assertEqual(query(q, (True,)).getresult(), r_true) finally: if bool_enabled is not None: pg.set_bool(bool_enabled_default) def testQueryWithBoolParamsNotDefault(self): self.testQueryWithBoolParams(bool_enabled=not pg.get_bool()) def testQueryWithIntParams(self): query = self.c.query self.assertEqual(query("select 1+1").getresult(), [(2,)]) self.assertEqual(query("select 1+$1", (1,)).getresult(), [(2,)]) self.assertEqual(query("select 1+$1", [1]).getresult(), [(2,)]) self.assertEqual(query("select $1::integer", (2,)).getresult(), [(2,)]) self.assertEqual(query("select $1::text", (2,)).getresult(), [('2',)]) self.assertEqual(query("select 1+$1::numeric", [1]).getresult(), [(Decimal('2'),)]) self.assertEqual(query("select 1, $1::integer", (2,) ).getresult(), [(1, 2)]) self.assertEqual(query("select 1 union select $1::integer", (2,) ).getresult(), [(1,), (2,)]) self.assertEqual(query("select $1::integer+$2", (1, 2) ).getresult(), [(3,)]) self.assertEqual(query("select $1::integer+$2", [1, 2] ).getresult(), [(3,)]) self.assertEqual(query("select 0+$1+$2+$3+$4+$5+$6", list(range(6)) ).getresult(), [(15,)]) def testQueryWithStrParams(self): query = self.c.query self.assertEqual(query("select $1||', world!'", ('Hello',) ).getresult(), [('Hello, world!',)]) self.assertEqual(query("select $1||', world!'", ['Hello'] ).getresult(), [('Hello, world!',)]) self.assertEqual(query("select $1||', '||$2||'!'", ('Hello', 'world'), ).getresult(), [('Hello, world!',)]) self.assertEqual(query("select $1::text", ('Hello, world!',) ).getresult(), [('Hello, world!',)]) self.assertEqual(query("select $1::text,$2::text", ('Hello', 'world') ).getresult(), [('Hello', 'world')]) self.assertEqual(query("select $1::text,$2::text", ['Hello', 'world'] ).getresult(), [('Hello', 'world')]) self.assertEqual(query("select $1::text union select $2::text", ('Hello', 'world')).getresult(), [('Hello',), ('world',)]) try: query("select 'wörld'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') self.assertEqual(query("select $1||', '||$2||'!'", ('Hello', 'w\xc3\xb6rld')).getresult(), [('Hello, w\xc3\xb6rld!',)]) def testQueryWithUnicodeParams(self): query = self.c.query try: query('set client_encoding=utf8') query("select 'wörld'").getresult()[0][0] == 'wörld' except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertEqual(query("select $1||', '||$2||'!'", ('Hello', u'wörld')).getresult(), [('Hello, wörld!',)]) def testQueryWithUnicodeParamsLatin1(self): query = self.c.query try: query('set client_encoding=latin1') query("select 'wörld'").getresult()[0][0] == 'wörld' except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") r = query("select $1||', '||$2||'!'", ('Hello', u'wörld')).getresult() if unicode_strings: self.assertEqual(r, [('Hello, wörld!',)]) else: self.assertEqual(r, [(u'Hello, wörld!'.encode('latin1'),)]) self.assertRaises(UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', u'мир')) query('set client_encoding=iso_8859_1') r = query("select $1||', '||$2||'!'", ('Hello', u'wörld')).getresult() if unicode_strings: self.assertEqual(r, [('Hello, wörld!',)]) else: self.assertEqual(r, [(u'Hello, wörld!'.encode('latin1'),)]) self.assertRaises(UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', u'мир')) query('set client_encoding=sql_ascii') self.assertRaises(UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', u'wörld')) def testQueryWithUnicodeParamsCyrillic(self): query = self.c.query try: query('set client_encoding=iso_8859_5') query("select 'мир'").getresult()[0][0] == 'мир' except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") self.assertRaises(UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', u'wörld')) r = query("select $1||', '||$2||'!'", ('Hello', u'мир')).getresult() if unicode_strings: self.assertEqual(r, [('Hello, мир!',)]) else: self.assertEqual(r, [(u'Hello, мир!'.encode('cyrillic'),)]) query('set client_encoding=sql_ascii') self.assertRaises(UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', u'мир!')) def testQueryWithMixedParams(self): self.assertEqual(self.c.query("select $1+2,$2||', world!'", (1, 'Hello'),).getresult(), [(3, 'Hello, world!')]) self.assertEqual(self.c.query("select $1::integer,$2::date,$3::text", (4711, None, 'Hello!'),).getresult(), [(4711, None, 'Hello!')]) def testQueryWithDuplicateParams(self): self.assertRaises(pg.ProgrammingError, self.c.query, "select $1+$1", (1,)) self.assertRaises(pg.ProgrammingError, self.c.query, "select $1+$1", (1, 2)) def testQueryWithZeroParams(self): self.assertEqual(self.c.query("select 1+1", [] ).getresult(), [(2,)]) def testQueryWithGarbage(self): garbage = r"'\{}+()-#[]oo324" self.assertEqual(self.c.query("select $1::text AS garbage", (garbage,) ).dictresult(), [{'garbage': garbage}]) class TestPreparedQueries(unittest.TestCase): """Test prepared queries via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def testEmptyPreparedStatement(self): self.c.prepare('', '') self.assertRaises(ValueError, self.c.query_prepared, '') def testInvalidPreparedStatement(self): self.assertRaises(pg.ProgrammingError, self.c.prepare, '', 'bad') def testDuplicatePreparedStatement(self): self.assertIsNone(self.c.prepare('q', 'select 1')) self.assertRaises(pg.ProgrammingError, self.c.prepare, 'q', 'select 2') def testNonExistentPreparedStatement(self): self.assertRaises(pg.OperationalError, self.c.query_prepared, 'does-not-exist') def testUnnamedQueryWithoutParams(self): self.assertIsNone(self.c.prepare('', "select 'anon'")) self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)]) self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)]) def testNamedQueryWithoutParams(self): self.assertIsNone(self.c.prepare('hello', "select 'world'")) self.assertEqual(self.c.query_prepared('hello').getresult(), [('world',)]) def testMultipleNamedQueriesWithoutParams(self): self.assertIsNone(self.c.prepare('query17', "select 17")) self.assertIsNone(self.c.prepare('query42', "select 42")) self.assertEqual(self.c.query_prepared('query17').getresult(), [(17,)]) self.assertEqual(self.c.query_prepared('query42').getresult(), [(42,)]) def testUnnamedQueryWithParams(self): self.assertIsNone(self.c.prepare('', "select $1 || ', ' || $2")) self.assertEqual( self.c.query_prepared('', ['hello', 'world']).getresult(), [('hello, world',)]) self.assertIsNone(self.c.prepare('', "select 1+ $1 + $2 + $3")) self.assertEqual( self.c.query_prepared('', [17, -5, 29]).getresult(), [(42,)]) def testMultipleNamedQueriesWithParams(self): self.assertIsNone(self.c.prepare('q1', "select $1 || '!'")) self.assertIsNone(self.c.prepare('q2', "select $1 || '-' || $2")) self.assertEqual(self.c.query_prepared('q1', ['hello']).getresult(), [('hello!',)]) self.assertEqual(self.c.query_prepared('q2', ['he', 'lo']).getresult(), [('he-lo',)]) def testDescribeNonExistentQuery(self): self.assertRaises(pg.OperationalError, self.c.describe_prepared, 'does-not-exist') def testDescribeUnnamedQuery(self): self.c.prepare('', "select 1::int, 'a'::char") r = self.c.describe_prepared('') self.assertEqual(r.listfields(), ('int4', 'bpchar')) def testDescribeNamedQuery(self): self.c.prepare('myquery', "select 1 as first, 2 as second") r = self.c.describe_prepared('myquery') self.assertEqual(r.listfields(), ('first', 'second')) def testDescribeMultipleNamedQueries(self): self.c.prepare('query1', "select 1::int") self.c.prepare('query2', "select 1::int, 2::int") r = self.c.describe_prepared('query1') self.assertEqual(r.listfields(), ('int4',)) r = self.c.describe_prepared('query2') self.assertEqual(r.listfields(), ('int4', 'int4')) class TestQueryResultTypes(unittest.TestCase): """Test proper result types via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') self.c.query("set datestyle='ISO,YMD'") self.c.query("set timezone='UTC'") def tearDown(self): self.c.close() def assert_proper_cast(self, value, pgtype, pytype): q = 'select $1::%s' % (pgtype,) try: r = self.c.query(q, (value,)).getresult()[0][0] except pg.ProgrammingError: if pgtype in ('json', 'jsonb'): self.skipTest('database does not support json') self.assertIsInstance(r, pytype) if isinstance(value, str): if not value or ' ' in value or '{' in value: value = '"%s"' % value value = '{%s}' % value r = self.c.query(q + '[]', (value,)).getresult()[0][0] if pgtype.startswith(('date', 'time', 'interval')): # arrays of these are casted by the DB wrapper only self.assertEqual(r, value) else: self.assertIsInstance(r, list) self.assertEqual(len(r), 1) self.assertIsInstance(r[0], pytype) def testInt(self): self.assert_proper_cast(0, 'int', int) self.assert_proper_cast(0, 'smallint', int) self.assert_proper_cast(0, 'oid', int) self.assert_proper_cast(0, 'cid', int) self.assert_proper_cast(0, 'xid', int) def testLong(self): self.assert_proper_cast(0, 'bigint', long) def testFloat(self): self.assert_proper_cast(0, 'float', float) self.assert_proper_cast(0, 'real', float) self.assert_proper_cast(0, 'double', float) self.assert_proper_cast(0, 'double precision', float) self.assert_proper_cast('infinity', 'float', float) def testFloat(self): decimal = pg.get_decimal() self.assert_proper_cast(decimal(0), 'numeric', decimal) self.assert_proper_cast(decimal(0), 'decimal', decimal) def testMoney(self): decimal = pg.get_decimal() self.assert_proper_cast(decimal('0'), 'money', decimal) def testBool(self): bool_type = bool if pg.get_bool() else str self.assert_proper_cast('f', 'bool', bool_type) def testDate(self): self.assert_proper_cast('1956-01-31', 'date', str) self.assert_proper_cast('10:20:30', 'interval', str) self.assert_proper_cast('08:42:15', 'time', str) self.assert_proper_cast('08:42:15+00', 'timetz', str) self.assert_proper_cast('1956-01-31 08:42:15', 'timestamp', str) self.assert_proper_cast('1956-01-31 08:42:15+00', 'timestamptz', str) def testText(self): self.assert_proper_cast('', 'text', str) self.assert_proper_cast('', 'char', str) self.assert_proper_cast('', 'bpchar', str) self.assert_proper_cast('', 'varchar', str) def testBytea(self): self.assert_proper_cast('', 'bytea', bytes) def testJson(self): self.assert_proper_cast('{}', 'json', dict) class TestQueryIterator(unittest.TestCase): """Test the query operating as an iterator.""" def setUp(self): self.c = connect() def tearDown(self): self.c.close() def testLen(self): r = self.c.query("select generate_series(3,7)") self.assertEqual(len(r), 5) def testGetItem(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(r[0], (7,)) self.assertEqual(r[1], (8,)) self.assertEqual(r[2], (9,)) def testGetItemWithNegativeIndex(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(r[-1], (9,)) self.assertEqual(r[-2], (8,)) self.assertEqual(r[-3], (7,)) def testGetItemOutOfRange(self): r = self.c.query("select generate_series(7,9)") self.assertRaises(IndexError, r.__getitem__, 3) def testIterate(self): r = self.c.query("select generate_series(3,5)") self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) self.assertEqual(list(r), [(3,), (4,), (5,)]) self.assertIsInstance(r[1], tuple) def testIterateTwice(self): r = self.c.query("select generate_series(3,5)") for i in range(2): self.assertEqual(list(r), [(3,), (4,), (5,)]) def testIterateTwoColumns(self): r = self.c.query("select 1,2 union select 3,4") self.assertIsInstance(r, Iterable) self.assertEqual(list(r), [(1, 2), (3, 4)]) def testNext(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(next(r), (7,)) self.assertEqual(next(r), (8,)) self.assertEqual(next(r), (9,)) self.assertRaises(StopIteration, next, r) def testContains(self): r = self.c.query("select generate_series(7,9)") self.assertIn((8,), r) self.assertNotIn((5,), r) def testDictIterate(self): r = self.c.query("select generate_series(3,5) as n").dictiter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [dict(n=3), dict(n=4), dict(n=5)]) self.assertIsInstance(r[1], dict) def testDictIterateTwoColumns(self): r = self.c.query("select 1 as one, 2 as two" " union select 3 as one, 4 as two").dictiter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [dict(one=1, two=2), dict(one=3, two=4)]) def testDictNext(self): r = self.c.query("select generate_series(7,9) as n").dictiter() self.assertEqual(next(r), dict(n=7)) self.assertEqual(next(r), dict(n=8)) self.assertEqual(next(r), dict(n=9)) self.assertRaises(StopIteration, next, r) def testDictContains(self): r = self.c.query("select generate_series(7,9) as n").dictiter() self.assertIn(dict(n=8), r) self.assertNotIn(dict(n=5), r) def testNamedIterate(self): r = self.c.query("select generate_series(3,5) as number").namediter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [(3,), (4,), (5,)]) self.assertIsInstance(r[1], tuple) self.assertEqual(r[1]._fields, ('number',)) self.assertEqual(r[1].number, 4) def testNamedIterateTwoColumns(self): r = self.c.query("select 1 as one, 2 as two" " union select 3 as one, 4 as two").namediter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [(1, 2), (3, 4)]) self.assertEqual(r[0]._fields, ('one', 'two')) self.assertEqual(r[0].one, 1) self.assertEqual(r[1]._fields, ('one', 'two')) self.assertEqual(r[1].two, 4) def testNamedNext(self): r = self.c.query("select generate_series(7,9) as number").namediter() self.assertEqual(next(r), (7,)) self.assertEqual(next(r), (8,)) n = next(r) self.assertEqual(n._fields, ('number',)) self.assertEqual(n.number, 9) self.assertRaises(StopIteration, next, r) def testNamedContains(self): r = self.c.query("select generate_series(7,9)").namediter() self.assertIn((8,), r) self.assertNotIn((5,), r) def testScalarIterate(self): r = self.c.query("select generate_series(3,5)").scalariter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [3, 4, 5]) self.assertIsInstance(r[1], int) def testScalarIterateTwoColumns(self): r = self.c.query("select 1, 2 union select 3, 4").scalariter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [1, 3]) def testScalarNext(self): r = self.c.query("select generate_series(7,9)").scalariter() self.assertEqual(next(r), 7) self.assertEqual(next(r), 8) self.assertEqual(next(r), 9) self.assertRaises(StopIteration, next, r) def testScalarContains(self): r = self.c.query("select generate_series(7,9)").scalariter() self.assertIn(8, r) self.assertNotIn(5, r) class TestQueryOneSingleScalar(unittest.TestCase): """Test the query methods for getting single rows and columns.""" def setUp(self): self.c = connect() def tearDown(self): self.c.close() def testOneWithEmptyQuery(self): q = self.c.query("select 0 where false") self.assertIsNone(q.one()) def testOneWithSingleRow(self): q = self.c.query("select 1, 2") r = q.one() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) self.assertEqual(q.one(), None) def testOneWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") self.assertEqual(q.one(), (1, 2)) self.assertEqual(q.one(), (3, 4)) self.assertEqual(q.one(), None) def testOneDictWithEmptyQuery(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onedict()) def testOneDictWithSingleRow(self): q = self.c.query("select 1 as one, 2 as two") r = q.onedict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) self.assertEqual(q.onedict(), None) def testOneDictWithTwoRows(self): q = self.c.query( "select 1 as one, 2 as two union select 3 as one, 4 as two") self.assertEqual(q.onedict(), dict(one=1, two=2)) self.assertEqual(q.onedict(), dict(one=3, two=4)) self.assertEqual(q.onedict(), None) def testOneNamedWithEmptyQuery(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onenamed()) def testOneNamedWithSingleRow(self): q = self.c.query("select 1 as one, 2 as two") r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) self.assertEqual(q.onenamed(), None) def testOneNamedWithTwoRows(self): q = self.c.query( "select 1 as one, 2 as two union select 3 as one, 4 as two") r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 3) self.assertEqual(r.two, 4) self.assertEqual(r, (3, 4)) self.assertEqual(q.onenamed(), None) def testOneScalarWithEmptyQuery(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onescalar()) def testOneScalarWithSingleRow(self): q = self.c.query("select 1, 2") r = q.onescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) self.assertEqual(q.onescalar(), None) def testOneScalarWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") self.assertEqual(q.onescalar(), 1) self.assertEqual(q.onescalar(), 3) self.assertEqual(q.onescalar(), None) def testSingleWithEmptyQuery(self): q = self.c.query("select 0 where false") try: q.single() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def testSingleWithSingleRow(self): q = self.c.query("select 1, 2") r = q.single() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) r = q.single() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) def testSingleWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.single() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def testSingleDictWithEmptyQuery(self): q = self.c.query("select 0 where false") try: q.singledict() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def testSingleDictWithSingleRow(self): q = self.c.query("select 1 as one, 2 as two") r = q.singledict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) r = q.singledict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) def testSingleDictWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singledict() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def testSingleNamedWithEmptyQuery(self): q = self.c.query("select 0 where false") try: q.singlenamed() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def testSingleNamedWithSingleRow(self): q = self.c.query("select 1 as one, 2 as two") r = q.singlenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) r = q.singlenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) def testSingleNamedWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singlenamed() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def testSingleScalarWithEmptyQuery(self): q = self.c.query("select 0 where false") try: q.singlescalar() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def testSingleScalarWithSingleRow(self): q = self.c.query("select 1, 2") r = q.singlescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) r = q.singlescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) def testSingleWithTwoRows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singlescalar() except pg.InvalidResultError as e: r = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def testScalarResult(self): q = self.c.query("select 1, 2 union select 3, 4") r = q.scalarresult() self.assertIsInstance(r, list) self.assertEqual(r, [1, 3]) def testScalarIter(self): q = self.c.query("select 1, 2 union select 3, 4") r = q.scalariter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [1, 3]) class TestInserttable(unittest.TestCase): """Test inserttable method.""" cls_set_up = False @classmethod def setUpClass(cls): c = connect() c.query("drop table if exists test cascade") c.query("create table test (" "i2 smallint, i4 integer, i8 bigint, b boolean, dt date, ti time," "d numeric, f4 real, f8 double precision, m money," "c char(1), v4 varchar(4), c4 char(4), t text)") # Check whether the test database uses SQL_ASCII - this means # that it does not consider encoding when calculating lengths. c.query("set client_encoding=utf8") try: c.query("select 'ä'") except (pg.DataError, pg.NotSupportedError): cls.has_encoding = False else: cls.has_encoding = c.query( "select length('ä') - length('a')").getresult()[0][0] == 0 c.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): c = connect() c.query("drop table test cascade") c.close() def setUp(self): self.assertTrue(self.cls_set_up) self.c = connect() self.c.query("set client_encoding=utf8") self.c.query("set datestyle='ISO,YMD'") self.c.query("set lc_monetary='C'") def tearDown(self): self.c.query("truncate table test") self.c.close() data = [ (-1, -1, long(-1), True, '1492-10-12', '08:30:00', -1.2345, -1.75, -1.875, '-1.25', '-', 'r?', '!u', 'xyz'), (0, 0, long(0), False, '1607-04-14', '09:00:00', 0.0, 0.0, 0.0, '0.0', ' ', '0123', '4567', '890'), (1, 1, long(1), True, '1801-03-04', '03:45:00', 1.23456, 1.75, 1.875, '1.25', 'x', 'bc', 'cdef', 'g'), (2, 2, long(2), False, '1903-12-17', '11:22:00', 2.345678, 2.25, 2.125, '2.75', 'y', 'q', 'ijk', 'mnop\nstux!')] @classmethod def db_len(cls, s, encoding): if cls.has_encoding: s = s if isinstance(s, unicode) else s.decode(encoding) else: s = s.encode(encoding) if isinstance(s, unicode) else s return len(s) def get_back(self, encoding='utf-8'): """Convert boolean and decimal values back.""" data = [] for row in self.c.query("select * from test order by 1").getresult(): self.assertIsInstance(row, tuple) row = list(row) if row[0] is not None: # smallint self.assertIsInstance(row[0], int) if row[1] is not None: # integer self.assertIsInstance(row[1], int) if row[2] is not None: # bigint self.assertIsInstance(row[2], long) if row[3] is not None: # boolean self.assertIsInstance(row[3], bool) if row[4] is not None: # date self.assertIsInstance(row[4], str) self.assertTrue(row[4].replace('-', '').isdigit()) if row[5] is not None: # time self.assertIsInstance(row[5], str) self.assertTrue(row[5].replace(':', '').isdigit()) if row[6] is not None: # numeric self.assertIsInstance(row[6], Decimal) row[6] = float(row[6]) if row[7] is not None: # real self.assertIsInstance(row[7], float) if row[8] is not None: # double precision self.assertIsInstance(row[8], float) row[8] = float(row[8]) if row[9] is not None: # money self.assertIsInstance(row[9], Decimal) row[9] = str(float(row[9])) if row[10] is not None: # char(1) self.assertIsInstance(row[10], str) self.assertEqual(self.db_len(row[10], encoding), 1) if row[11] is not None: # varchar(4) self.assertIsInstance(row[11], str) self.assertLessEqual(self.db_len(row[11], encoding), 4) if row[12] is not None: # char(4) self.assertIsInstance(row[12], str) self.assertEqual(self.db_len(row[12], encoding), 4) row[12] = row[12].rstrip() if row[13] is not None: # text self.assertIsInstance(row[13], str) row = tuple(row) data.append(row) return data def testInserttable1Row(self): data = self.data[2:3] self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def testInserttable4Rows(self): data = self.data self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def testInserttableFromTupleOfLists(self): data = tuple(list(row) for row in self.data) self.c.inserttable('test', data) self.assertEqual(self.get_back(), self.data) def testInserttableFromSetofTuples(self): data = set(row for row in self.data) try: self.c.inserttable('test', data) except TypeError as e: r = str(e) else: r = 'this is fine' self.assertIn('list or a tuple as second argument', r) def testInserttableFromListOfSets(self): data = [set(row) for row in self.data] try: self.c.inserttable('test', data) except TypeError as e: r = str(e) else: r = 'this is fine' self.assertIn('second argument must contain a tuple or a list', r) def testInserttableMultipleRows(self): num_rows = 100 data = self.data[2:3] * num_rows self.c.inserttable('test', data) r = self.c.query("select count(*) from test").getresult()[0][0] self.assertEqual(r, num_rows) def testInserttableMultipleCalls(self): num_rows = 10 data = self.data[2:3] for _i in range(num_rows): self.c.inserttable('test', data) r = self.c.query("select count(*) from test").getresult()[0][0] self.assertEqual(r, num_rows) def testInserttableNullValues(self): data = [(None,) * 14] * 100 self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def testInserttableMaxValues(self): data = [(2 ** 15 - 1, int(2 ** 31 - 1), long(2 ** 31 - 1), True, '2999-12-31', '11:59:59', 1e99, 1.0 + 1.0 / 32, 1.0 + 1.0 / 32, None, "1", "1234", "1234", "1234" * 100)] self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def testInserttableByteValues(self): try: self.c.query("select '€', 'käse', 'сыр', 'pont-l''évêque'") except pg.DataError: self.skipTest("database does not support utf8") # non-ascii chars do not fit in char(1) when there is no encoding c = u'€' if self.has_encoding else u'$' row_unicode = (0, 0, long(0), False, u'1970-01-01', u'00:00:00', 0.0, 0.0, 0.0, u'0.0', c, u'bäd', u'bäd', u"käse сыр pont-l'évêque") row_bytes = tuple(s.encode('utf-8') if isinstance(s, unicode) else s for s in row_unicode) data = [row_bytes] * 2 self.c.inserttable('test', data) if unicode_strings: data = [row_unicode] * 2 self.assertEqual(self.get_back(), data) def testInserttableUnicodeUtf8(self): try: self.c.query("select '€', 'käse', 'сыр', 'pont-l''évêque'") except pg.DataError: self.skipTest("database does not support utf8") # non-ascii chars do not fit in char(1) when there is no encoding c = u'€' if self.has_encoding else u'$' row_unicode = (0, 0, long(0), False, u'1970-01-01', u'00:00:00', 0.0, 0.0, 0.0, u'0.0', c, u'bäd', u'bäd', u"käse сыр pont-l'évêque") data = [row_unicode] * 2 self.c.inserttable('test', data) if not unicode_strings: row_bytes = tuple(s.encode('utf-8') if isinstance(s, unicode) else s for s in row_unicode) data = [row_bytes] * 2 self.assertEqual(self.get_back(), data) def testInserttableUnicodeLatin1(self): try: self.c.query("set client_encoding=latin1") self.c.query("select '¥'") except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") # non-ascii chars do not fit in char(1) when there is no encoding c = u'€' if self.has_encoding else u'$' row_unicode = (0, 0, long(0), False, u'1970-01-01', u'00:00:00', 0.0, 0.0, 0.0, u'0.0', c, u'bäd', u'bäd', u"for käse and pont-l'évêque pay in €") data = [row_unicode] # cannot encode € sign with latin1 encoding self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data) row_unicode = tuple(s.replace(u'€', u'¥') if isinstance(s, unicode) else s for s in row_unicode) data = [row_unicode] * 2 self.c.inserttable('test', data) if not unicode_strings: row_bytes = tuple(s.encode('latin1') if isinstance(s, unicode) else s for s in row_unicode) data = [row_bytes] * 2 self.assertEqual(self.get_back('latin1'), data) def testInserttableUnicodeLatin9(self): try: self.c.query("set client_encoding=latin9") self.c.query("select '€'") except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") return # non-ascii chars do not fit in char(1) when there is no encoding c = u'€' if self.has_encoding else u'$' row_unicode = (0, 0, long(0), False, u'1970-01-01', u'00:00:00', 0.0, 0.0, 0.0, u'0.0', c, u'bäd', u'bäd', u"for käse and pont-l'évêque pay in €") data = [row_unicode] * 2 self.c.inserttable('test', data) if not unicode_strings: row_bytes = tuple(s.encode('latin9') if isinstance(s, unicode) else s for s in row_unicode) data = [row_bytes] * 2 self.assertEqual(self.get_back('latin9'), data) def testInserttableNoEncoding(self): self.c.query("set client_encoding=sql_ascii") # non-ascii chars do not fit in char(1) when there is no encoding c = u'€' if self.has_encoding else u'$' row_unicode = (0, 0, long(0), False, u'1970-01-01', u'00:00:00', 0.0, 0.0, 0.0, u'0.0', c, u'bäd', u'bäd', u"for käse and pont-l'évêque pay in €") data = [row_unicode] # cannot encode non-ascii unicode without a specific encoding self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data) class TestDirectSocketAccess(unittest.TestCase): """Test copy command with direct socket access.""" cls_set_up = False @classmethod def setUpClass(cls): c = connect() c.query("drop table if exists test cascade") c.query("create table test (i int, v varchar(16))") c.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): c = connect() c.query("drop table test cascade") c.close() def setUp(self): self.assertTrue(self.cls_set_up) self.c = connect() self.c.query("set client_encoding=utf8") def tearDown(self): self.c.query("truncate table test") self.c.close() def testPutline(self): putline = self.c.putline query = self.c.query data = list(enumerate("apple pear plum cherry banana".split())) query("copy test from stdin") try: for i, v in data: putline("%d\t%s\n" % (i, v)) putline("\\.\n") finally: self.c.endcopy() r = query("select * from test").getresult() self.assertEqual(r, data) def testPutlineBytesAndUnicode(self): putline = self.c.putline query = self.c.query try: query("select 'käse+würstel'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') query("copy test from stdin") try: putline(u"47\tkäse\n".encode('utf8')) putline("35\twürstel\n") putline(b"\\.\n") finally: self.c.endcopy() r = query("select * from test").getresult() self.assertEqual(r, [(47, 'käse'), (35, 'würstel')]) def testGetline(self): getline = self.c.getline query = self.c.query data = list(enumerate("apple banana pear plum strawberry".split())) n = len(data) self.c.inserttable('test', data) query("copy test to stdout") try: for i in range(n + 2): v = getline() if i < n: self.assertEqual(v, '%d\t%s' % data[i]) elif i == n: self.assertEqual(v, '\\.') else: self.assertIsNone(v) finally: try: self.c.endcopy() except IOError: pass def testGetlineBytesAndUnicode(self): getline = self.c.getline query = self.c.query try: query("select 'käse+würstel'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') data = [(54, u'käse'.encode('utf8')), (73, u'würstel')] self.c.inserttable('test', data) query("copy test to stdout") try: v = getline() self.assertIsInstance(v, str) self.assertEqual(v, '54\tkäse') v = getline() self.assertIsInstance(v, str) self.assertEqual(v, '73\twürstel') self.assertEqual(getline(), '\\.') self.assertIsNone(getline()) finally: try: self.c.endcopy() except IOError: pass def testParameterChecks(self): self.assertRaises(TypeError, self.c.putline) self.assertRaises(TypeError, self.c.getline, 'invalid') self.assertRaises(TypeError, self.c.endcopy, 'invalid') class TestNotificatons(unittest.TestCase): """Test notification support.""" def setUp(self): self.c = connect() def tearDown(self): self.doCleanups() self.c.close() def testGetNotify(self): getnotify = self.c.getnotify query = self.c.query self.assertIsNone(getnotify()) query('listen test_notify') try: self.assertIsNone(self.c.getnotify()) query("notify test_notify") r = getnotify() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertIsInstance(r[0], str) self.assertIsInstance(r[1], int) self.assertIsInstance(r[2], str) self.assertEqual(r[0], 'test_notify') self.assertEqual(r[2], '') self.assertIsNone(self.c.getnotify()) query("notify test_notify, 'test_payload'") r = getnotify() self.assertTrue(isinstance(r, tuple)) self.assertEqual(len(r), 3) self.assertIsInstance(r[0], str) self.assertIsInstance(r[1], int) self.assertIsInstance(r[2], str) self.assertEqual(r[0], 'test_notify') self.assertEqual(r[2], 'test_payload') self.assertIsNone(getnotify()) finally: query('unlisten test_notify') def testGetNoticeReceiver(self): self.assertIsNone(self.c.get_notice_receiver()) def testSetNoticeReceiver(self): self.assertRaises(TypeError, self.c.set_notice_receiver, 42) self.assertRaises(TypeError, self.c.set_notice_receiver, 'invalid') self.assertIsNone(self.c.set_notice_receiver(lambda notice: None)) self.assertIsNone(self.c.set_notice_receiver(None)) def testSetAndGetNoticeReceiver(self): r = lambda notice: None self.assertIsNone(self.c.set_notice_receiver(r)) self.assertIs(self.c.get_notice_receiver(), r) self.assertIsNone(self.c.set_notice_receiver(None)) self.assertIsNone(self.c.get_notice_receiver()) def testNoticeReceiver(self): self.addCleanup(self.c.query, 'drop function bilbo_notice();') self.c.query('''create function bilbo_notice() returns void AS $$ begin raise warning 'Bilbo was here!'; end; $$ language plpgsql''') received = {} def notice_receiver(notice): for attr in dir(notice): if attr.startswith('__'): continue value = getattr(notice, attr) if isinstance(value, str): value = value.replace('WARNUNG', 'WARNING') received[attr] = value self.c.set_notice_receiver(notice_receiver) self.c.query('select bilbo_notice()') self.assertEqual(received, dict( pgcnx=self.c, message='WARNING: Bilbo was here!\n', severity='WARNING', primary='Bilbo was here!', detail=None, hint=None)) class TestConfigFunctions(unittest.TestCase): """Test the functions for changing default settings. To test the effect of most of these functions, we need a database connection. That's why they are covered in this test module. """ def setUp(self): self.c = connect() self.c.query("set client_encoding=utf8") self.c.query('set bytea_output=hex') self.c.query("set lc_monetary='C'") def tearDown(self): self.c.close() def testGetDecimalPoint(self): point = pg.get_decimal_point() # error if a parameter is passed self.assertRaises(TypeError, pg.get_decimal_point, point) self.assertIsInstance(point, str) self.assertEqual(point, '.') # the default setting pg.set_decimal_point(',') try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertEqual(r, ',') pg.set_decimal_point("'") try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertEqual(r, "'") pg.set_decimal_point('') try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsNone(r) pg.set_decimal_point(None) try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsNone(r) def testSetDecimalPoint(self): d = pg.Decimal point = pg.get_decimal_point() self.assertRaises(TypeError, pg.set_decimal_point) # error if decimal point is not a string self.assertRaises(TypeError, pg.set_decimal_point, 0) # error if more than one decimal point passed self.assertRaises(TypeError, pg.set_decimal_point, '.', ',') self.assertRaises(TypeError, pg.set_decimal_point, '.,') # error if decimal point is not a punctuation character self.assertRaises(TypeError, pg.set_decimal_point, '0') query = self.c.query # check that money values are interpreted as decimal values # only if decimal_point is set, and that the result is correct # only if it is set suitable for the current lc_monetary setting select_money = "select '34.25'::money" proper_money = d('34.25') bad_money = d('3425') en_locales = 'en', 'en_US', 'en_US.utf8', 'en_US.UTF-8' en_money = '$34.25', '$ 34.25', '34.25$', '34.25 $', '34.25 Dollar' de_locales = 'de', 'de_DE', 'de_DE.utf8', 'de_DE.UTF-8' de_money = ('34,25€', '34,25 €', '€34,25', '€ 34,25', 'EUR34,25', 'EUR 34,25', '34,25 EUR', '34,25 Euro', '34,25 DM') # first try with English localization (using the point) for lc in en_locales: try: query("set lc_monetary='%s'" % lc) except pg.DataError: pass else: break else: self.skipTest("cannot set English money locale") try: query(select_money) except (pg.DataError, pg.ProgrammingError): # this can happen if the currency signs cannot be # converted using the encoding of the test database self.skipTest("database does not support English money") pg.set_decimal_point(None) try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, en_money) pg.set_decimal_point('') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, en_money) pg.set_decimal_point('.') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, proper_money) pg.set_decimal_point(',') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, bad_money) pg.set_decimal_point("'") try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, bad_money) # then try with German localization (using the comma) for lc in de_locales: try: query("set lc_monetary='%s'" % lc) except pg.DataError: pass else: break else: self.skipTest("cannot set German money locale") select_money = select_money.replace('.', ',') try: query(select_money) except (pg.DataError, pg.ProgrammingError): self.skipTest("database does not support German money") pg.set_decimal_point(None) try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, de_money) pg.set_decimal_point('') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, de_money) pg.set_decimal_point(',') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, proper_money) pg.set_decimal_point('.') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertEqual(r, bad_money) pg.set_decimal_point("'") try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertEqual(r, bad_money) def testGetDecimal(self): decimal_class = pg.get_decimal() # error if a parameter is passed self.assertRaises(TypeError, pg.get_decimal, decimal_class) self.assertIs(decimal_class, pg.Decimal) # the default setting pg.set_decimal(int) try: r = pg.get_decimal() finally: pg.set_decimal(decimal_class) self.assertIs(r, int) r = pg.get_decimal() self.assertIs(r, decimal_class) def testSetDecimal(self): decimal_class = pg.get_decimal() # error if no parameter is passed self.assertRaises(TypeError, pg.set_decimal) query = self.c.query try: r = query("select 3425::numeric") except pg.DatabaseError: self.skipTest('database does not support numeric') r = r.getresult()[0][0] self.assertIsInstance(r, decimal_class) self.assertEqual(r, decimal_class('3425')) r = query("select 3425::numeric") pg.set_decimal(int) try: r = r.getresult()[0][0] finally: pg.set_decimal(decimal_class) self.assertNotIsInstance(r, decimal_class) self.assertIsInstance(r, int) self.assertEqual(r, int(3425)) def testGetBool(self): use_bool = pg.get_bool() # error if a parameter is passed self.assertRaises(TypeError, pg.get_bool, use_bool) self.assertIsInstance(use_bool, bool) self.assertIs(use_bool, True) # the default setting pg.set_bool(False) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(True) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bool(0) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(1) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) def testSetBool(self): use_bool = pg.get_bool() # error if no parameter is passed self.assertRaises(TypeError, pg.set_bool) query = self.c.query try: r = query("select true::bool") except pg.ProgrammingError: self.skipTest('database does not support bool') r = r.getresult()[0][0] self.assertIsInstance(r, bool) self.assertEqual(r, True) pg.set_bool(False) try: r = query("select true::bool").getresult()[0][0] finally: pg.set_bool(use_bool) self.assertIsInstance(r, str) self.assertIs(r, 't') pg.set_bool(True) try: r = query("select true::bool").getresult()[0][0] finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) def testGetByteEscaped(self): bytea_escaped = pg.get_bytea_escaped() # error if a parameter is passed self.assertRaises(TypeError, pg.get_bytea_escaped, bytea_escaped) self.assertIsInstance(bytea_escaped, bool) self.assertIs(bytea_escaped, False) # the default setting pg.set_bytea_escaped(True) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(False) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bytea_escaped(1) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(0) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, False) def testSetByteaEscaped(self): bytea_escaped = pg.get_bytea_escaped() # error if no parameter is passed self.assertRaises(TypeError, pg.set_bytea_escaped) query = self.c.query try: r = query("select 'data'::bytea") except pg.ProgrammingError: self.skipTest('database does not support bytea') r = r.getresult()[0][0] self.assertIsInstance(r, bytes) self.assertEqual(r, b'data') pg.set_bytea_escaped(True) try: r = query("select 'data'::bytea").getresult()[0][0] finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, str) self.assertEqual(r, '\\x64617461') pg.set_bytea_escaped(False) try: r = query("select 'data'::bytea").getresult()[0][0] finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bytes) self.assertEqual(r, b'data') def testSetRowFactorySize(self): try: from functools import lru_cache except ImportError: # Python < 3.2 lru_cache = None queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc'] query = self.c.query for maxsize in (None, 0, 1, 2, 3, 10, 1024): pg.set_row_factory_size(maxsize) for i in range(3): for q in queries: r = query(q).namedresult()[0] if q.endswith('abc'): self.assertEqual(r, (123,)) self.assertEqual(r._fields, ('abc',)) else: self.assertEqual(r, (1, 2, 3)) self.assertEqual(r._fields, ('a', 'b', 'c')) if lru_cache: info = pg._row_factory.cache_info() self.assertEqual(info.maxsize, maxsize) self.assertEqual(info.hits + info.misses, 6) self.assertEqual(info.hits, 0 if maxsize is not None and maxsize < 2 else 4) class TestStandaloneEscapeFunctions(unittest.TestCase): """Test pg escape functions. The libpq interface memorizes some parameters of the last opened connection that influence the result of these functions. Therefore we need to open a connection with fixed parameters prior to testing in order to ensure that the tests always run under the same conditions. That's why these tests are included in this test module. """ cls_set_up = False @classmethod def setUpClass(cls): db = connect() query = db.query query('set client_encoding=sql_ascii') query('set standard_conforming_strings=off') try: query('set bytea_output=escape') except pg.ProgrammingError: if db.server_version >= 90000: raise # ignore for older server versions db.close() cls.cls_set_up = True def testEscapeString(self): self.assertTrue(self.cls_set_up) f = pg.escape_string r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is'' käse".encode('utf-8')) r = f(u"that's cheesy") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s cheesy") r = f(r"It's bad to have a \ inside.") self.assertEqual(r, r"It''s bad to have a \\ inside.") def testEscapeBytea(self): self.assertTrue(self.cls_set_up) f = pg.escape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, b"das is'' k\\\\303\\\\244se") r = f(u"that's cheesy") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s cheesy") r = f(b'O\x00ps\xff!') self.assertEqual(r, b'O\\\\000ps\\\\377!') if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_classic_dbwrapper.py000077500000000000000000005714631365010227600216540ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the DB wrapper object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import os import sys import gc import json import tempfile import pg # the module under test from decimal import Decimal from datetime import date, time, datetime, timedelta from uuid import UUID from time import strftime from operator import itemgetter # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. dbname = 'unittest' dbhost = None dbport = 5432 debug = False # let DB wrapper print debugging output try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = dict if str is bytes: # noinspection PyUnresolvedReferences from StringIO import StringIO else: from io import StringIO windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def DB(): """Create a DB wrapper object connecting to the test database.""" db = pg.DB(dbname, dbhost, dbport) if debug: db.debug = debug db.query("set client_min_messages=warning") return db class TestAttrDict(unittest.TestCase): """Test the simple ordered dictionary for attribute names.""" cls = pg.AttrDict base = OrderedDict def testInit(self): a = self.cls() self.assertIsInstance(a, self.base) self.assertEqual(a, self.base()) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertIsInstance(a, self.base) self.assertEqual(a, self.base(items)) iteritems = iter(items) a = self.cls(iteritems) self.assertIsInstance(a, self.base) self.assertEqual(a, self.base(items)) def testIter(self): a = self.cls() self.assertEqual(list(a), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a), keys) def testKeys(self): a = self.cls() self.assertEqual(list(a.keys()), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a.keys()), keys) def testValues(self): a = self.cls() self.assertEqual(list(a.values()), []) items = [('id', 'int'), ('name', 'text')] values = [item[1] for item in items] a = self.cls(items) self.assertEqual(list(a.values()), values) def testItems(self): a = self.cls() self.assertEqual(list(a.items()), []) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertEqual(list(a.items()), items) def testGet(self): a = self.cls([('id', 1)]) try: self.assertEqual(a['id'], 1) except KeyError: self.fail('AttrDict should be readable') def testSet(self): a = self.cls() try: a['id'] = 1 except TypeError: pass else: self.fail('AttrDict should be read-only') def testDel(self): a = self.cls([('id', 1)]) try: del a['id'] except TypeError: pass else: self.fail('AttrDict should be read-only') def testWriteMethods(self): a = self.cls([('id', 1)]) self.assertEqual(a['id'], 1) for method in 'clear', 'update', 'pop', 'setdefault', 'popitem': method = getattr(a, method) self.assertRaises(TypeError, method, a) class TestDBClassInit(unittest.TestCase): """Test proper handling of errors when creating DB instances.""" def testBadParams(self): self.assertRaises(TypeError, pg.DB, invalid=True) def testDeleteDb(self): db = DB() del db.db self.assertRaises(pg.InternalError, db.close) del db class TestDBClassBasic(unittest.TestCase): """Test existence of the DB class wrapped pg connection methods.""" def setUp(self): self.db = DB() def tearDown(self): try: self.db.close() except pg.InternalError: pass def testAllDBAttributes(self): attributes = [ 'abort', 'adapter', 'backend_pid', 'begin', 'cancel', 'clear', 'close', 'commit', 'date_format', 'db', 'dbname', 'dbtypes', 'debug', 'decode_json', 'delete', 'delete_prepared', 'describe_prepared', 'encode_json', 'end', 'endcopy', 'error', 'escape_bytea', 'escape_identifier', 'escape_literal', 'escape_string', 'fileno', 'get', 'get_as_dict', 'get_as_list', 'get_attnames', 'get_cast_hook', 'get_databases', 'get_notice_receiver', 'get_parameter', 'get_relations', 'get_tables', 'getline', 'getlo', 'getnotify', 'has_table_privilege', 'host', 'insert', 'inserttable', 'locreate', 'loimport', 'notification_handler', 'options', 'parameter', 'pkey', 'port', 'prepare', 'protocol_version', 'putline', 'query', 'query_formatted', 'query_prepared', 'release', 'reopen', 'reset', 'rollback', 'savepoint', 'server_version', 'set_cast_hook', 'set_notice_receiver', 'set_parameter', 'socket', 'source', 'ssl_attributes', 'ssl_in_use', 'start', 'status', 'transaction', 'truncate', 'unescape_bytea', 'update', 'upsert', 'use_regtypes', 'user', ] # __dir__ is not called in Python 2.6 for old-style classes db_attributes = dir(self.db) if hasattr( self.db.__class__, '__class__') else self.db.__dir__() db_attributes = [a for a in db_attributes if not a.startswith('_')] self.assertEqual(attributes, db_attributes) def testAttributeDb(self): self.assertEqual(self.db.db.db, dbname) def testAttributeDbname(self): self.assertEqual(self.db.dbname, dbname) def testAttributeError(self): error = self.db.error self.assertTrue(not error or 'krb5_' in error) self.assertEqual(self.db.error, self.db.db.error) @unittest.skipIf(do_not_ask_for_host, do_not_ask_for_host_reason) def testAttributeHost(self): if dbhost and not dbhost.startswith('/'): host = dbhost else: host = 'localhost' self.assertIsInstance(self.db.host, str) self.assertEqual(self.db.host, host) self.assertEqual(self.db.db.host, host) def testAttributeOptions(self): no_options = '' options = self.db.options self.assertEqual(options, no_options) self.assertEqual(options, self.db.db.options) def testAttributePort(self): def_port = 5432 port = self.db.port self.assertIsInstance(port, int) self.assertEqual(port, dbport or def_port) self.assertEqual(port, self.db.db.port) def testAttributeProtocolVersion(self): protocol_version = self.db.protocol_version self.assertIsInstance(protocol_version, int) self.assertTrue(2 <= protocol_version < 4) self.assertEqual(protocol_version, self.db.db.protocol_version) def testAttributeServerVersion(self): server_version = self.db.server_version self.assertIsInstance(server_version, int) self.assertTrue(90000 <= server_version < 130000) self.assertEqual(server_version, self.db.db.server_version) def testAttributeSocket(self): socket = self.db.socket self.assertIsInstance(socket, int) self.assertGreaterEqual(socket, 0) def testAttributeBackendPid(self): backend_pid = self.db.backend_pid self.assertIsInstance(backend_pid, int) self.assertGreaterEqual(backend_pid, 1) def testAttributeSslInUse(self): ssl_in_use = self.db.ssl_in_use self.assertIsInstance(ssl_in_use, bool) self.assertFalse(ssl_in_use) def testAttributeSslAttributes(self): ssl_attributes = self.db.ssl_attributes self.assertIsInstance(ssl_attributes, dict) self.assertEqual(ssl_attributes, { 'cipher': None, 'compression': None, 'key_bits': None, 'library': None, 'protocol': None}) def testAttributeStatus(self): status_ok = 1 status = self.db.status self.assertIsInstance(status, int) self.assertEqual(status, status_ok) self.assertEqual(status, self.db.db.status) def testAttributeUser(self): no_user = 'Deprecated facility' user = self.db.user self.assertTrue(user) self.assertIsInstance(user, str) self.assertNotEqual(user, no_user) self.assertEqual(user, self.db.db.user) def testMethodEscapeLiteral(self): self.assertEqual(self.db.escape_literal(''), "''") def testMethodEscapeIdentifier(self): self.assertEqual(self.db.escape_identifier(''), '""') def testMethodEscapeString(self): self.assertEqual(self.db.escape_string(''), '') def testMethodEscapeBytea(self): self.assertEqual(self.db.escape_bytea('').replace( '\\x', '').replace('\\', ''), '') def testMethodUnescapeBytea(self): self.assertEqual(self.db.unescape_bytea(''), b'') def testMethodDecodeJson(self): self.assertEqual(self.db.decode_json('{}'), {}) def testMethodEncodeJson(self): self.assertEqual(self.db.encode_json({}), '{}') def testMethodQuery(self): query = self.db.query query("select 1+1") query("select 1+$1+$2", 2, 3) query("select 1+$1+$2", (2, 3)) query("select 1+$1+$2", [2, 3]) query("select 1+$1", 1) def testMethodQueryEmpty(self): self.assertRaises(ValueError, self.db.query, '') def testMethodQueryDataError(self): try: self.db.query("select 1/0") except pg.DataError as error: self.assertEqual(error.sqlstate, '22012') def testMethodEndcopy(self): try: self.db.endcopy() except IOError: pass def testMethodClose(self): self.db.close() try: self.db.reset() except pg.Error: pass else: self.fail('Reset should give an error for a closed connection') self.assertIsNone(self.db.db) self.assertRaises(pg.InternalError, self.db.close) self.assertRaises(pg.InternalError, self.db.query, 'select 1') self.assertRaises(pg.InternalError, getattr, self.db, 'status') self.assertRaises(pg.InternalError, getattr, self.db, 'error') self.assertRaises(pg.InternalError, getattr, self.db, 'absent') def testMethodReset(self): con = self.db.db self.db.reset() self.assertIs(self.db.db, con) self.db.query("select 1+1") self.db.close() self.assertRaises(pg.InternalError, self.db.reset) def testMethodReopen(self): con = self.db.db self.db.reopen() self.assertIsNot(self.db.db, con) con = self.db.db self.db.query("select 1+1") self.db.close() self.db.reopen() self.assertIsNot(self.db.db, con) self.db.query("select 1+1") self.db.close() def testExistingConnection(self): db = pg.DB(self.db.db) self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) self.assertIsNotNone(self.db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db = pg.DB(self.db) self.assertEqual(self.db.db, db.db) db = pg.DB(db=self.db.db) self.assertEqual(self.db.db, db.db) def testExistingDbApi2Connection(self): class DBApi2Con: def __init__(self, cnx): self._cnx = cnx def close(self): self._cnx.close() db2 = DBApi2Con(self.db.db) db = pg.DB(db2) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db2.close() class TestDBClass(unittest.TestCase): """Test the methods of the DB class wrapped pg connection.""" maxDiff = 80 * 20 cls_set_up = False regtypes = None @classmethod def setUpClass(cls): db = DB() cls.oids = db.server_version < 120000 db.query("drop table if exists test cascade") db.query("create table test (" "i2 smallint, i4 integer, i8 bigint," " d numeric, f4 real, f8 double precision, m money," " v4 varchar(4), c4 char(4), t text)") db.query("create or replace view test_view as" " select i4, v4 from test") db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() db.query("drop table test cascade") db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() if self.regtypes is None: self.regtypes = self.db.use_regtypes() else: self.db.use_regtypes(self.regtypes) query = self.db.query query('set client_encoding=utf8') query("set lc_monetary='C'") query("set datestyle='ISO,YMD'") query('set standard_conforming_strings=on') try: query('set bytea_output=hex') except pg.ProgrammingError: if self.db.server_version >= 90000: raise # ignore for older server versions def tearDown(self): self.doCleanups() self.db.close() def createTable(self, table, definition, temporary=True, oids=None, values=None): query = self.db.query if '"' not in table or '.' in table: table = '"%s"' % table if not temporary: q = 'drop table if exists %s cascade' % table query(q) self.addCleanup(query, q) temporary = 'temporary table' if temporary else 'table' as_query = definition.startswith(('as ', 'AS ')) if not as_query and not definition.startswith('('): definition = '(%s)' % definition with_oids = 'with oids' if oids else ( 'without oids' if self.oids else '') q = ['create', temporary, table] if as_query: q.extend([with_oids, definition]) else: q.extend([definition, with_oids]) q = ' '.join(q) query(q) if values: for params in values: if not isinstance(params, (list, tuple)): params = [params] values = ', '.join('$%d' % (n + 1) for n in range(len(params))) q = "insert into %s values (%s)" % (table, values) query(q, params) def testClassName(self): self.assertEqual(self.db.__class__.__name__, 'DB') def testModuleName(self): self.assertEqual(self.db.__module__, 'pg') self.assertEqual(self.db.__class__.__module__, 'pg') def testEscapeLiteral(self): f = self.db.escape_literal r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"'plain'") r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u"'plain'") r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"'that''s käse'".encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u"'that''s käse'") self.assertEqual(f(r"It's fine to have a \ inside."), r" E'It''s fine to have a \\ inside.'") self.assertEqual(f('No "quotes" must be escaped.'), "'No \"quotes\" must be escaped.'") def testEscapeIdentifier(self): f = self.db.escape_identifier r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b'"plain"') r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u'"plain"') r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u'"that\'s käse"'.encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u'"that\'s käse"') self.assertEqual(f(r"It's fine to have a \ inside."), '"It\'s fine to have a \\ inside."') self.assertEqual(f('All "quotes" must be escaped.'), '"All ""quotes"" must be escaped."') def testEscapeString(self): f = self.db.escape_string r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"plain") r = f(u"plain") self.assertIsInstance(r, unicode) self.assertEqual(r, u"plain") r = f(u"that's käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, u"that''s käse".encode('utf-8')) r = f(u"that's käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u"that''s käse") self.assertEqual(f(r"It's fine to have a \ inside."), r"It''s fine to have a \ inside.") def testEscapeBytea(self): f = self.db.escape_bytea # note that escape_byte always returns hex output since Pg 9.0, # regardless of the bytea_output setting r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x706c61696e') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'\\x706c61696e') r = f(u"das is' käse".encode('utf-8')) self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x64617320697327206bc3a47365') r = f(u"das is' käse") self.assertIsInstance(r, unicode) self.assertEqual(r, u'\\x64617320697327206bc3a47365') self.assertEqual(f(b'O\x00ps\xff!'), b'\\x4f007073ff21') def testUnescapeBytea(self): f = self.db.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf8')) r = f(u"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf8')) self.assertEqual(f(r'O\\000ps\\377!'), b'O\\000ps\\377!') self.assertEqual(f(r'\\x706c61696e'), b'\\x706c61696e') self.assertEqual(f(r'\\x746861742773206be47365'), b'\\x746861742773206be47365') self.assertEqual(f(r'\\x4f007073ff21'), b'\\x4f007073ff21') def testDecodeJson(self): f = self.db.decode_json self.assertIsNone(f('null')) data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(text) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def testEncodeJson(self): f = self.db.encode_json self.assertEqual(f(None), 'null') data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(data) self.assertIsInstance(r, str) self.assertEqual(r, text) def testGetParameter(self): f = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') r = f('standard_conforming_strings') self.assertEqual(r, 'on') r = f('lc_monetary') self.assertEqual(r, 'C') r = f('datestyle') self.assertEqual(r, 'ISO, YMD') r = f('bytea_output') self.assertEqual(r, 'hex') r = f(['bytea_output', 'lc_monetary']) self.assertIsInstance(r, list) self.assertEqual(r, ['hex', 'C']) r = f(('standard_conforming_strings', 'datestyle', 'bytea_output')) self.assertEqual(r, ['on', 'ISO, YMD', 'hex']) r = f(set(['bytea_output', 'lc_monetary'])) self.assertIsInstance(r, dict) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) r = f(set(['Bytea_Output', ' LC_Monetary '])) self.assertIsInstance(r, dict) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) s = dict.fromkeys(('bytea_output', 'lc_monetary')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) s = dict.fromkeys(('Bytea_Output', ' LC_Monetary ')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) def testGetParameterServerVersion(self): r = self.db.get_parameter('server_version_num') self.assertIsInstance(r, str) s = self.db.server_version self.assertIsInstance(s, int) self.assertEqual(r, str(s)) def testGetParameterAll(self): f = self.db.get_parameter r = f('all') self.assertIsInstance(r, dict) self.assertEqual(r['standard_conforming_strings'], 'on') self.assertEqual(r['lc_monetary'], 'C') self.assertEqual(r['DateStyle'], 'ISO, YMD') self.assertEqual(r['bytea_output'], 'hex') def testSetParameter(self): f = self.db.set_parameter g = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(ValueError, f, 'all', 'invalid') self.assertRaises(ValueError, f, { 'invalid1': 'value1', 'invalid2': 'value2'}, 'value') self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') f('standard_conforming_strings', 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f('datestyle', 'ISO, DMY') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['standard_conforming_strings', 'datestyle'], ['on', 'ISO, DMY']) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['escape_string_warning', 'standard_conforming_strings'], 'off') self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f(('standard_conforming_strings', 'datestyle'), ('on', 'ISO, YMD')) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') f(('escape_string_warning', 'standard_conforming_strings'), 'off') self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f(set(['escape_string_warning', 'standard_conforming_strings']), 'on') self.assertEqual(g('escape_string_warning'), 'on') self.assertEqual(g('standard_conforming_strings'), 'on') self.assertRaises(ValueError, f, set(['escape_string_warning', 'standard_conforming_strings']), ['off', 'on']) f(set(['escape_string_warning', 'standard_conforming_strings']), ['off', 'off']) self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f({'standard_conforming_strings': 'on', 'datestyle': 'ISO, YMD'}) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') def testResetParameter(self): db = DB() f = db.set_parameter g = db.get_parameter r = g('escape_string_warning') self.assertIn(r, ('on', 'off')) esw, not_esw = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f('escape_string_warning') f('standard_conforming_strings', None) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f(['escape_string_warning', 'standard_conforming_strings'], None) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f(('escape_string_warning', 'standard_conforming_strings')) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f(set(['escape_string_warning', 'standard_conforming_strings'])) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def testResetParameterAll(self): db = DB() f = db.set_parameter self.assertRaises(ValueError, f, 'all', 0) self.assertRaises(ValueError, f, 'all', 'off') g = db.get_parameter r = g('escape_string_warning') self.assertIn(r, ('on', 'off')) dwi, not_dwi = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('escape_string_warning', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f('all') self.assertEqual(g('escape_string_warning'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def testSetParameterLocal(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=True) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'on') def testSetParameterSession(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=False) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'off') def testReset(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle == 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reset() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) q = con.query("show datestyle") r = q.getresult()[0][0] self.assertEqual(r, default_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def testReopen(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle == 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reopen() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) self.assertRaises(TypeError, getattr, con, 'query') r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def testCreateTable(self): table = 'test hello world' values = [(2, "World!"), (1, "Hello")] self.createTable(table, "n smallint, t varchar", temporary=True, oids=False, values=values) r = self.db.query('select t from "%s" order by n' % table).getresult() r = ', '.join(row[0] for row in r) self.assertEqual(r, "Hello, World!") def testCreateTableWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") table = 'test hello world' values = [(2, "World!"), (1, "Hello")] self.createTable(table, "n smallint, t varchar", temporary=True, oids=True, values=values) r = self.db.query('select t from "%s" order by n' % table).getresult() r = ', '.join(row[0] for row in r) self.assertEqual(r, "Hello, World!") r = self.db.query('select oid from "%s" limit 1' % table).getresult() self.assertIsInstance(r[0][0], int) def testQuery(self): query = self.db.query table = 'test_table' self.createTable(table, "n integer", oids=False) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "select n from test_table where n>1" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, int) self.assertEqual(r, 2) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def testQueryWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") query = self.db.query table = 'test_table' self.createTable(table, "n integer", oids=True) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, int) q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, int) oid = r q = "select oid from test_table where n=2" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r, oid) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def testMultipleQueries(self): self.assertEqual(self.db.query( "create temporary table test_multi (n integer);" "insert into test_multi values (4711);" "select n from test_multi").getresult()[0][0], 4711) def testQueryWithParams(self): query = self.db.query self.createTable('test_table', 'n1 integer, n2 integer', oids=False) q = "insert into test_table values ($1, $2)" r = query(q, (1, 2)) self.assertEqual(r, '1') r = query(q, [3, 4]) self.assertEqual(r, '1') r = query(q, [5, 6]) self.assertEqual(r, '1') q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 4), (5, 6)]) q = "select * from test_table where n1=$1 and n2=$2" self.assertEqual(query(q, 3, 4).getresult(), [(3, 4)]) q = "update test_table set n2=$2 where n1=$1" r = query(q, 3, 7) self.assertEqual(r, '1') q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 7), (5, 6)]) q = "delete from test_table where n2!=$1" r = query(q, 4) self.assertEqual(r, '3') def testEmptyQuery(self): self.assertRaises(ValueError, self.db.query, '') def testQueryDataError(self): try: self.db.query("select 1/0") except pg.DataError as error: self.assertEqual(error.sqlstate, '22012') def testQueryFormatted(self): f = self.db.query_formatted t = True if pg.get_bool() else 't' # test with tuple q = f("select %s::int, %s::real, %s::text, %s::bool", (3, 2.5, 'hello', True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with tuple, inline q = f("select %s, %s, %s, %s", (3, 2.5, 'hello', True), inline=True) r = q.getresult()[0] if isinstance(r[1], Decimal): # Python 2.6 cannot compare float and Decimal r = list(r) r[1] = float(r[1]) r = tuple(r) self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict q = f("select %(a)s::int, %(b)s::real, %(c)s::text, %(d)s::bool", dict(a=3, b=2.5, c='hello', d=True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict, inline q = f("select %(a)s, %(b)s, %(c)s, %(d)s", dict(a=3, b=2.5, c='hello', d=True), inline=True) r = q.getresult()[0] if isinstance(r[1], Decimal): # Python 2.6 cannot compare float and Decimal r = list(r) r[1] = float(r[1]) r = tuple(r) self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict and extra values q = f("select %(a)s||%(b)s||%(c)s||%(d)s||'epsilon'", dict(a='alpha', b='beta', c='gamma', d='delta', e='extra')) r = q.getresult()[0][0] self.assertEqual(r, 'alphabetagammadeltaepsilon') def testQueryFormattedWithAny(self): f = self.db.query_formatted q = "select 2 = any(%s)" r = f(q, [[1, 3]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[1, 2, 3]]).getresult()[0][0] self.assertEqual(r, True if pg.get_bool() else 't') r = f(q, [[]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[None]]).getresult()[0][0] self.assertIsNone(r) def testQueryFormattedWithoutParams(self): f = self.db.query_formatted q = "select 42" r = f(q).getresult()[0][0] self.assertEqual(r, 42) r = f(q, None).getresult()[0][0] self.assertEqual(r, 42) r = f(q, []).getresult()[0][0] self.assertEqual(r, 42) r = f(q, {}).getresult()[0][0] self.assertEqual(r, 42) def testPrepare(self): p = self.db.prepare self.assertIsNone(p('my query', "select 'hello'")) self.assertIsNone(p('my other query', "select 'world'")) self.assertRaises(pg.ProgrammingError, p, 'my query', "select 'hello, too'") def testPrepareUnnamed(self): p = self.db.prepare self.assertIsNone(p('', "select null")) self.assertIsNone(p(None, "select null")) def testQueryPreparedWithoutParams(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, 'q') p = self.db.prepare p('q1', "select 17") p('q2', "select 42") r = f('q1').getresult()[0][0] self.assertEqual(r, 17) r = f('q2').getresult()[0][0] self.assertEqual(r, 42) def testQueryPreparedWithParams(self): p = self.db.prepare p('sum', "select 1 + $1 + $2 + $3") p('cat', "select initcap($1) || ', ' || $2 || '!'") f = self.db.query_prepared r = f('sum', 2, 3, 5).getresult()[0][0] self.assertEqual(r, 11) r = f('cat', 'hello', 'world').getresult()[0][0] self.assertEqual(r, 'Hello, world!') def testQueryPreparedUnnamedWithOutParams(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, None) self.assertRaises(pg.OperationalError, f, '') p = self.db.prepare # make sure all types are known so that we will not # generate other anonymous queries in the background p('', "select 'empty'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'empty') r = f('').getresult()[0][0] self.assertEqual(r, 'empty') p(None, "select 'none'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'none') r = f('').getresult()[0][0] self.assertEqual(r, 'none') def testQueryPreparedUnnamedWithParams(self): p = self.db.prepare p('', "select 1 + $1 + $2") f = self.db.query_prepared r = f('', 2, 3).getresult()[0][0] self.assertEqual(r, 6) r = f(None, 2, 3).getresult()[0][0] self.assertEqual(r, 6) p(None, "select 2 + $1 + $2") f = self.db.query_prepared r = f('', 3, 4).getresult()[0][0] self.assertEqual(r, 9) r = f(None, 3, 4).getresult()[0][0] self.assertEqual(r, 9) def testDescribePrepared(self): self.db.prepare('count', "select 1 as first, 2 as second") f = self.db.describe_prepared r = f('count').listfields() self.assertEqual(r, ('first', 'second')) def testDescribePreparedUnnamed(self): self.db.prepare('', "select null as anon") f = self.db.describe_prepared r = f().listfields() self.assertEqual(r, ('anon',)) r = f(None).listfields() self.assertEqual(r, ('anon',)) r = f('').listfields() self.assertEqual(r, ('anon',)) def testDeletePrepared(self): f = self.db.delete_prepared f() e = pg.OperationalError self.assertRaises(e, f, 'myquery') p = self.db.prepare p('q1', "select 1") p('q2', "select 2") f('q1') f('q2') self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') p('q1', "select 1") p('q2', "select 2") f() self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') def testPkey(self): query = self.db.query pkey = self.db.pkey self.assertRaises(KeyError, pkey, 'test') for t in ('pkeytest', 'primary key test'): self.createTable('%s0' % t, 'a smallint') self.createTable('%s1' % t, 'b smallint primary key') self.createTable('%s2' % t, 'c smallint, d smallint primary key') self.createTable('%s3' % t, 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (f, h)') self.createTable('%s4' % t, 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (h, f)') self.createTable('%s5' % t, 'more_than_one_letter varchar primary key') self.createTable('%s6' % t, '"with space" date primary key') self.createTable('%s7' % t, 'a_very_long_column_name varchar, "with space" date, "42" int,' ' primary key (a_very_long_column_name, "with space", "42")') self.assertRaises(KeyError, pkey, '%s0' % t) self.assertEqual(pkey('%s1' % t), 'b') self.assertEqual(pkey('%s1' % t, True), ('b',)) self.assertEqual(pkey('%s1' % t, composite=False), 'b') self.assertEqual(pkey('%s1' % t, composite=True), ('b',)) self.assertEqual(pkey('%s2' % t), 'd') self.assertEqual(pkey('%s2' % t, composite=True), ('d',)) r = pkey('%s3' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey('%s3' % t, composite=False) self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey('%s4' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ('h', 'f')) self.assertEqual(pkey('%s5' % t), 'more_than_one_letter') self.assertEqual(pkey('%s6' % t), 'with space') r = pkey('%s7' % t) self.assertIsInstance(r, tuple) self.assertEqual(r, ( 'a_very_long_column_name', 'with space', '42')) # a newly added primary key will be detected query('alter table "%s0" add primary key (a)' % t) self.assertEqual(pkey('%s0' % t), 'a') # a changed primary key will not be detected, # indicating that the internal cache is operating query('alter table "%s1" rename column b to x' % t) self.assertEqual(pkey('%s1' % t), 'b') # we get the changed primary key when the cache is flushed self.assertEqual(pkey('%s1' % t, flush=True), 'x') def testGetDatabases(self): databases = self.db.get_databases() self.assertIn('template0', databases) self.assertIn('template1', databases) self.assertNotIn('not existing database', databases) self.assertIn('postgres', databases) self.assertIn(dbname, databases) def testGetTables(self): get_tables = self.db.get_tables tables = ('A very Special Name', 'A_MiXeD_quoted_NaMe', 'Hello, Test World!', 'Zoro', 'a1', 'a2', 'a321', 'averyveryveryveryveryveryveryreallyreallylongtablename', 'b0', 'b3', 'x', 'xXx', 'xx', 'y', 'z') for t in tables: self.db.query('drop table if exists "%s" cascade' % t) before_tables = get_tables() self.assertIsInstance(before_tables, list) for t in before_tables: t = t.split('.', 1) self.assertGreaterEqual(len(t), 2) if len(t) > 2: self.assertTrue(t[1].startswith('"')) t = t[0] self.assertNotEqual(t, 'information_schema') self.assertFalse(t.startswith('pg_')) for t in tables: self.createTable(t, 'as select 0', temporary=False) current_tables = get_tables() new_tables = [t for t in current_tables if t not in before_tables] expected_new_tables = ['public.%s' % ( '"%s"' % t if ' ' in t or t != t.lower() else t) for t in tables] self.assertEqual(new_tables, expected_new_tables) self.doCleanups() after_tables = get_tables() self.assertEqual(after_tables, before_tables) def testGetSystemTables(self): get_tables = self.db.get_tables result = get_tables() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) def testGetRelations(self): get_relations = self.db.get_relations result = get_relations() self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('rv') self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('r') self.assertIn('public.test', result) self.assertNotIn('public.test_view', result) result = get_relations('v') self.assertNotIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('cisSt') self.assertNotIn('public.test', result) self.assertNotIn('public.test_view', result) def testGetSystemRelations(self): get_relations = self.db.get_relations result = get_relations() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertIn('information_schema.tables', result) def testGetAttnames(self): get_attnames = self.db.get_attnames self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'does_not_exist') self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'has.too.many.dots') r = get_attnames('test') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( i2='smallint', i4='integer', i8='bigint', d='numeric', f4='real', f8='double precision', m='money', v4='character varying', c4='character', t='text')) else: self.assertEqual(r, dict( i2='int', i4='int', i8='int', d='num', f4='float', f8='float', m='money', v4='text', c4='text', t='text')) self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') r = get_attnames('test_table') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) else: self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def testGetAttnamesWithQuotes(self): get_attnames = self.db.get_attnames table = 'test table for get_attnames()' self.createTable(table, '"Prime!" smallint, "much space" integer, "Questions?" text') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'Prime!': 'smallint', 'much space': 'integer', 'Questions?': 'text'}) else: self.assertEqual(r, { 'Prime!': 'int', 'much space': 'int', 'Questions?': 'text'}) table = 'yet another test table for get_attnames()' self.createTable(table, 'a smallint, b integer, c bigint,' ' e numeric, f real, f2 double precision, m money,' ' x smallint, y smallint, z smallint,' ' Normal_NaMe smallint, "Special Name" smallint,' ' t text, u char(2), v varchar(2),' ' primary key (y, u)') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'a': 'smallint', 'b': 'integer', 'c': 'bigint', 'e': 'numeric', 'f': 'real', 'f2': 'double precision', 'm': 'money', 'normal_name': 'smallint', 'Special Name': 'smallint', 'u': 'character', 't': 'text', 'v': 'character varying', 'y': 'smallint', 'x': 'smallint', 'z': 'smallint'}) else: self.assertEqual(r, {'a': 'int', 'b': 'int', 'c': 'int', 'e': 'num', 'f': 'float', 'f2': 'float', 'm': 'money', 'normal_name': 'int', 'Special Name': 'int', 'u': 'text', 't': 'text', 'v': 'text', 'y': 'int', 'x': 'int', 'z': 'int'}) def testGetAttnamesWithRegtypes(self): get_attnames = self.db.get_attnames self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(True) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) def testGetAttnamesWithoutRegtypes(self): get_attnames = self.db.get_attnames self.createTable('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(False) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def testGetAttnamesIsCached(self): get_attnames = self.db.get_attnames int_type = 'integer' if self.regtypes else 'int' text_type = 'text' query = self.db.query self.createTable('test_table', 'col int') r = get_attnames("test_table") self.assertIsInstance(r, dict) self.assertEqual(r, dict(col=int_type)) query("alter table test_table alter column col type text") query("alter table test_table add column col2 int") r = get_attnames("test_table") self.assertEqual(r, dict(col=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type, col2=int_type)) query("alter table test_table drop column col2") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type, col2=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type)) query("alter table test_table drop column col") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict()) def testGetAttnamesIsOrdered(self): get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, OrderedDict) if self.regtypes: self.assertEqual(r, OrderedDict([ ('i2', 'smallint'), ('i4', 'integer'), ('i8', 'bigint'), ('d', 'numeric'), ('f4', 'real'), ('f8', 'double precision'), ('m', 'money'), ('v4', 'character varying'), ('c4', 'character'), ('t', 'text')])) else: self.assertEqual(r, OrderedDict([ ('i2', 'int'), ('i4', 'int'), ('i8', 'int'), ('d', 'num'), ('f4', 'float'), ('f8', 'float'), ('m', 'money'), ('v4', 'text'), ('c4', 'text'), ('t', 'text')])) if OrderedDict is not dict: r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.createTable(table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, OrderedDict) if self.regtypes: self.assertEqual(r, OrderedDict([ ('n', 'integer'), ('alpha', 'smallint'), ('v', 'character varying'), ('gamma', 'character'), ('tau', 'text'), ('beta', 'boolean')])) else: self.assertEqual(r, OrderedDict([ ('n', 'int'), ('alpha', 'int'), ('v', 'text'), ('gamma', 'text'), ('tau', 'text'), ('beta', 'bool')])) if OrderedDict is not dict: r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') else: self.skipTest('OrderedDict is not supported') def testGetAttnamesIsAttrDict(self): AttrDict = pg.AttrDict get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict([ ('i2', 'smallint'), ('i4', 'integer'), ('i8', 'bigint'), ('d', 'numeric'), ('f4', 'real'), ('f8', 'double precision'), ('m', 'money'), ('v4', 'character varying'), ('c4', 'character'), ('t', 'text')])) else: self.assertEqual(r, AttrDict([ ('i2', 'int'), ('i4', 'int'), ('i8', 'int'), ('d', 'num'), ('f4', 'float'), ('f8', 'float'), ('m', 'money'), ('v4', 'text'), ('c4', 'text'), ('t', 'text')])) r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.createTable(table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict([ ('n', 'integer'), ('alpha', 'smallint'), ('v', 'character varying'), ('gamma', 'character'), ('tau', 'text'), ('beta', 'boolean')])) else: self.assertEqual(r, AttrDict([ ('n', 'int'), ('alpha', 'int'), ('v', 'text'), ('gamma', 'text'), ('tau', 'text'), ('beta', 'bool')])) r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') def testHasTablePrivilege(self): can = self.db.has_table_privilege self.assertEqual(can('test'), True) self.assertEqual(can('test', 'select'), True) self.assertEqual(can('test', 'SeLeCt'), True) self.assertEqual(can('test', 'SELECT'), True) self.assertEqual(can('test', 'insert'), True) self.assertEqual(can('test', 'update'), True) self.assertEqual(can('test', 'delete'), True) self.assertRaises(pg.DataError, can, 'test', 'foobar') self.assertRaises(pg.ProgrammingError, can, 'table_does_not_exist') r = self.db.query('select rolsuper FROM pg_roles' ' where rolname=current_user').getresult()[0][0] if not pg.get_bool(): r = r == 't' if r: self.skipTest('must not be superuser') self.assertEqual(can('pg_views', 'select'), True) self.assertEqual(can('pg_views', 'delete'), False) def testGet(self): get = self.db.get query = self.db.query table = 'get_test_table' self.assertRaises(TypeError, get) self.assertRaises(TypeError, get, table) self.createTable(table, 'n integer, t text', values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) r = get(table, 2, 'n') self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) r = get(table, 1, 'n') self.assertEqual(r, dict(n=1, t='x')) r = get(table, (3,), ('n',)) self.assertEqual(r, dict(n=3, t='z')) r = get(table, 'y', 't') self.assertEqual(r, dict(n=2, t='y')) self.assertRaises(pg.DatabaseError, get, table, 4) self.assertRaises(pg.DatabaseError, get, table, 4, 'n') self.assertRaises(pg.DatabaseError, get, table, 'y') self.assertRaises(pg.DatabaseError, get, table, 2, 't') s = dict(n=3) self.assertRaises(pg.ProgrammingError, get, table, s) r = get(table, s, 'n') self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(t='x') r = get(table, s, 't') self.assertIs(r, s) self.assertEqual(s, dict(n=1, t='x')) r = get(table, s, ('n', 't')) self.assertIs(r, s) self.assertEqual(r, dict(n=1, t='x')) query('alter table "%s" alter n set not null' % table) query('alter table "%s" add primary key (n)' % table) r = get(table, 2) self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table + '*', 2)['t'], 'y') self.assertEqual(get(table + ' *', 2)['t'], 'y') self.assertRaises(KeyError, get, table, (2, 2)) s = dict(n=3) r = get(table, s) self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(n=1) self.assertEqual(get(table, s)['t'], 'x') s.update(n=2) self.assertEqual(get(table, r)['t'], 'y') s.pop('n') self.assertRaises(KeyError, get, table, s) def testGetWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") get = self.db.get query = self.db.query table = 'get_with_oid_test_table' self.createTable(table, 'n integer, t text', oids=True, values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) self.assertRaises(KeyError, get, table, {}, 'oid') r = get(table, 2, 'n') qoid = 'oid(%s)' % table self.assertIn(qoid, r) oid = r[qoid] self.assertIsInstance(oid, int) result = {'t': 'y', 'n': 2, qoid: oid} self.assertEqual(r, result) r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) self.assertEqual(get(table + '*', 2, 'n'), r) self.assertEqual(get(table + ' *', 2, 'n'), r) self.assertEqual(get(table, oid, 'oid')['t'], 'y') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, 3, 'n')['t'], 'z') self.assertEqual(get(table, 2, 'n')['t'], 'y') self.assertRaises(pg.DatabaseError, get, table, 4, 'n') r['n'] = 3 self.assertEqual(get(table, r, 'n')['t'], 'z') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, r, 'oid')['t'], 'z') query('alter table "%s" alter n set not null' % table) query('alter table "%s" add primary key (n)' % table) self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 2)['t'], 'y') r['n'] = 1 self.assertEqual(get(table, r)['t'], 'x') r['n'] = 3 self.assertEqual(get(table, r)['t'], 'z') r['n'] = 2 self.assertEqual(get(table, r)['t'], 'y') r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid, n=1)) self.assertEqual(r['n'], 1) self.assertNotEqual(r[qoid], oid) r = get(table, dict(oid=oid, t='z'), 't') self.assertEqual(r['n'], 3) self.assertNotEqual(r[qoid], oid) def testGetWithCompositeKey(self): get = self.db.get query = self.db.query table = 'get_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertEqual(get(table, 2)['t'], 'b') self.assertEqual(get(table, 1, 'n')['t'], 'a') self.assertEqual(get(table, 2, ('n',))['t'], 'b') self.assertEqual(get(table, 3, ['n'])['t'], 'c') self.assertEqual(get(table, (2,), ('n',))['t'], 'b') self.assertEqual(get(table, 'b', 't')['n'], 2) self.assertEqual(get(table, ('a',), ('t',))['n'], 1) self.assertEqual(get(table, ['c'], ['t'])['n'], 3) table = 'get_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, get, table, 2) self.assertEqual(get(table, (1, 1))['t'], 'a') self.assertEqual(get(table, (1, 2))['t'], 'b') self.assertEqual(get(table, (2, 1))['t'], 'c') self.assertEqual(get(table, (1, 2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, (1, 2), ('m', 'n'))['t'], 'c') self.assertEqual(get(table, (3, 1), ('n', 'm'))['t'], 'e') self.assertEqual(get(table, (1, 3), ('m', 'n'))['t'], 'e') self.assertEqual(get(table, dict(n=2, m=2))['t'], 'd') self.assertEqual(get(table, dict(n=1, m=2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, dict(n=2, m=1), ['n', 'm'])['t'], 'c') self.assertEqual(get(table, dict(n=3, m=2), ('m', 'n'))['t'], 'f') def testGetWithQuotedNames(self): get = self.db.get query = self.db.query table = 'test table for get()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(17, 1001, 'No!')]) r = get(table, 17) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 17) self.assertEqual(r['much space'], 1001) self.assertEqual(r['Questions?'], 'No!') def testGetFromView(self): self.db.query('delete from test where i4=14') self.db.query('insert into test (i4, v4) values(' "14, 'abc4')") r = self.db.get('test_view', 14, 'i4') self.assertIn('v4', r) self.assertEqual(r['v4'], 'abc4') def testGetLittleBobbyTables(self): get = self.db.get query = self.db.query self.createTable('test_students', 'firstname varchar primary key, nickname varchar, grade char(2)', values=[("D'Arcy", 'Darcey', 'A+'), ('Sheldon', 'Moonpie', 'A+'), ('Robert', 'Little Bobby Tables', 'D-')]) r = get('test_students', 'Sheldon') self.assertEqual(r, dict( firstname="Sheldon", nickname='Moonpie', grade='A+')) r = get('test_students', 'Robert') self.assertEqual(r, dict( firstname="Robert", nickname='Little Bobby Tables', grade='D-')) r = get('test_students', "D'Arcy") self.assertEqual(r, dict( firstname="D'Arcy", nickname='Darcey', grade='A+')) try: get('test_students', "D' Arcy") except pg.DatabaseError as error: self.assertEqual(str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="D\' Arcy"') try: get('test_students', "Robert'); TRUNCATE TABLE test_students;--") except pg.DatabaseError as error: self.assertEqual(str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="Robert\'); TRUNCATE TABLE test_students;--"') q = "select * from test_students order by 1 limit 4" r = query(q).getresult() self.assertEqual(len(r), 3) self.assertEqual(r[1][2], 'D-') def testInsert(self): insert = self.db.insert query = self.db.query bool_on = pg.get_bool() decimal = pg.get_decimal() table = 'insert_test_table' self.createTable(table, 'i2 smallint, i4 integer, i8 bigint,' ' d numeric, f4 real, f8 double precision, m money,' ' v4 varchar(4), c4 char(4), t text,' ' b boolean, ts timestamp') tests = [dict(i2=None, i4=None, i8=None), (dict(i2='', i4='', i8=''), dict(i2=None, i4=None, i8=None)), (dict(i2=0, i4=0, i8=0), dict(i2=0, i4=0, i8=0)), dict(i2=42, i4=123456, i8=9876543210), dict(i2=2 ** 15 - 1, i4=int(2 ** 31 - 1), i8=long(2 ** 63 - 1)), dict(d=None), (dict(d=''), dict(d=None)), dict(d=Decimal(0)), (dict(d=0), dict(d=Decimal(0))), dict(f4=None, f8=None), dict(f4=0, f8=0), (dict(f4='', f8=''), dict(f4=None, f8=None)), (dict(d=1234.5, f4=1234.5, f8=1234.5), dict(d=Decimal('1234.5'))), dict(d=Decimal('123.456789'), f4=12.375, f8=123.4921875), dict(d=Decimal('123456789.9876543212345678987654321')), dict(m=None), (dict(m=''), dict(m=None)), dict(m=Decimal('-1234.56')), (dict(m='-1234.56'), dict(m=Decimal('-1234.56'))), dict(m=Decimal('1234.56')), dict(m=Decimal('123456')), (dict(m='1234.56'), dict(m=Decimal('1234.56'))), (dict(m=1234.5), dict(m=Decimal('1234.5'))), (dict(m=-1234.5), dict(m=Decimal('-1234.5'))), (dict(m=123456), dict(m=Decimal('123456'))), (dict(m='1234567.89'), dict(m=Decimal('1234567.89'))), dict(b=None), (dict(b=''), dict(b=None)), dict(b='f'), dict(b='t'), (dict(b=0), dict(b='f')), (dict(b=1), dict(b='t')), (dict(b=False), dict(b='f')), (dict(b=True), dict(b='t')), (dict(b='0'), dict(b='f')), (dict(b='1'), dict(b='t')), (dict(b='n'), dict(b='f')), (dict(b='y'), dict(b='t')), (dict(b='no'), dict(b='f')), (dict(b='yes'), dict(b='t')), (dict(b='off'), dict(b='f')), (dict(b='on'), dict(b='t')), dict(v4=None, c4=None, t=None), (dict(v4='', c4='', t=''), dict(c4=' ' * 4)), dict(v4='1234', c4='1234', t='1234' * 10), dict(v4='abcd', c4='abcd', t='abcdefg'), (dict(v4='abc', c4='abc', t='abc'), dict(c4='abc ')), dict(ts=None), (dict(ts=''), dict(ts=None)), (dict(ts=0), dict(ts=None)), (dict(ts=False), dict(ts=None)), dict(ts='2012-12-21 00:00:00'), (dict(ts='2012-12-21'), dict(ts='2012-12-21 00:00:00')), dict(ts='2012-12-21 12:21:12'), dict(ts='2013-01-05 12:13:14'), dict(ts='current_timestamp')] for test in tests: if isinstance(test, dict): data = test change = {} else: data, change = test expect = data.copy() expect.update(change) if bool_on: b = expect.get('b') if b is not None: expect['b'] = b == 't' if decimal is not Decimal: d = expect.get('d') if d is not None: expect['d'] = decimal(d) m = expect.get('m') if m is not None: expect['m'] = decimal(m) self.assertEqual(insert(table, data), data) data = dict(item for item in data.items() if item[0] in expect) ts = expect.get('ts') if ts: if ts == 'current_timestamp': ts = data['ts'] self.assertIsInstance(ts, datetime) self.assertEqual(ts.strftime('%Y-%m-%d'), strftime('%Y-%m-%d')) else: ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S') expect['ts'] = ts self.assertEqual(data, expect) data = query('select * from "%s"' % table).dictresult()[0] data = dict(item for item in data.items() if item[0] in expect) self.assertEqual(data, expect) query('delete from "%s"' % table) def testInsertWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") insert = self.db.insert query = self.db.query self.createTable('test_table', 'n int', oids=True) self.assertRaises(pg.ProgrammingError, insert, 'test_table', m=1) r = insert('test_table', n=1) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) self.assertNotIn('oid', r) qoid = 'oid(test_table)' self.assertIn(qoid, r) oid = r[qoid] self.assertEqual(sorted(r.keys()), ['n', qoid]) r = insert('test_table', n=2, oid=oid) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 2) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r = insert('test_table', None, n=3) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 3) s = r r = insert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table *', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table', r, n=4) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertNotIn('oid', r) self.assertIn(qoid, r) oid = r[qoid] r = insert('test_table', r, n=5, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r['oid'] = oid = r[qoid] r = insert('test_table', r, n=6) self.assertIs(r, s) self.assertEqual(r['n'], 6) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) q = 'select n from test_table order by 1 limit 9' r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '1 2 3 3 3 4 5 6') query("truncate test_table") query("alter table test_table add unique (n)") r = insert('test_table', dict(n=7)) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) self.assertRaises(pg.IntegrityError, insert, 'test_table', r) r['n'] = 6 self.assertRaises(pg.IntegrityError, insert, 'test_table', r, n=7) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) r['n'] = 6 r = insert('test_table', r) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 6) r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '6 7') def testInsertWithQuotedNames(self): insert = self.db.insert query = self.db.query table = 'test table for insert()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r = {'Prime!': 11, 'much space': 2002, 'Questions?': 'What?'} r = insert(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') r = query('select * from "%s" limit 2' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') def testInsertIntoView(self): insert = self.db.insert query = self.db.query query("truncate test") q = 'select * from test_view order by i4 limit 3' r = query(q).getresult() self.assertEqual(r, []) r = dict(i4=1234, v4='abcd') insert('test', r) self.assertIsNone(r['i2']) self.assertEqual(r['i4'], 1234) self.assertIsNone(r['i8']) self.assertEqual(r['v4'], 'abcd') self.assertIsNone(r['c4']) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd')]) r = dict(i4=5678, v4='efgh') try: insert('test_view', r) except (pg.OperationalError, pg.NotSupportedError) as error: if self.db.server_version < 90300: # must setup rules in older PostgreSQL versions self.skipTest('database cannot insert into view') self.fail(str(error)) self.assertNotIn('i2', r) self.assertEqual(r['i4'], 5678) self.assertNotIn('i8', r) self.assertEqual(r['v4'], 'efgh') self.assertNotIn('c4', r) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd'), (5678, 'efgh')]) def testUpdate(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.createTable(table, 'n integer primary key, t text', values=enumerate('xyz', start=1)) self.assertRaises(pg.DatabaseError, self.db.get, table, 4) r = self.db.get(table, 2) r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def testUpdateWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") update = self.db.update get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=[1]) s = get('test_table', 1, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 1) s['n'] = 2 r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 2) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertEqual(sorted(r.keys()), ['n', qoid]) r['n'] = 3 oid = r.pop(qoid) r = update('test_table', r, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 3) r.pop(qoid) self.assertRaises(pg.ProgrammingError, update, 'test_table', r) s = get('test_table', 3, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 3) s.pop('n') r = update('test_table', s) oid = r.pop(qoid) self.assertEqual(r, {}) q = "select n from test_table limit 2" r = query(q).getresult() self.assertEqual(r, [(3,)]) query("insert into test_table values (1)") self.assertRaises(pg.ProgrammingError, update, 'test_table', dict(oid=oid, n=4)) r = update('test_table', dict(n=4), oid=oid) self.assertEqual(r['n'], 4) r = update('test_table *', dict(n=5), oid=oid) self.assertEqual(r['n'], 5) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=1, m=4) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 4) s = dict(m=7) r = update('test_table', s, n=5) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 7) q = "select n, m from test_table order by 1 limit 3" r = query(q).getresult() self.assertEqual(r, [(1, 4), (5, 7)]) s = dict(m=9, oid=oid) self.assertRaises(KeyError, update, 'test_table', s) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 9) s = dict(n=1, m=3, oid=oid) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r = query(q).getresult() self.assertEqual(r, [(1, 3), (5, 9)]) s.update(n=4, m=7) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertEqual(r['m'], 7) r = query(q).getresult() self.assertEqual(r, [(1, 3), (4, 7)]) def testUpdateWithoutOid(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.createTable(table, 'n integer primary key, t text', oids=False, values=enumerate('xyz', start=1)) r = self.db.get(table, 2) r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def testUpdateWithCompositeKey(self): update = self.db.update query = self.db.query table = 'update_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, update, table, dict(t='b')) s = dict(n=2, t='d') r = update(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'd') q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'd') s.update(dict(n=4, t='e')) r = update(table, s) self.assertEqual(r['n'], 4) self.assertEqual(r['t'], 'e') q = 'select t from "%s" where n=2' % table r = query(q).getresult()[0][0] self.assertEqual(r, 'd') q = 'select t from "%s" where n=4' % table r = query(q).getresult() self.assertEqual(len(r), 0) query('drop table "%s"' % table) table = 'update_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, update, table, dict(n=2, t='b')) self.assertEqual(update(table, dict(n=2, m=2, t='x'))['t'], 'x') q = 'select t from "%s" where n=2 order by m' % table r = [r[0] for r in query(q).getresult()] self.assertEqual(r, ['c', 'x']) def testUpdateWithQuotedNames(self): update = self.db.update query = self.db.query table = 'test table for update()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(13, 3003, 'Why!')]) r = {'Prime!': 13, 'much space': 7007, 'Questions?': 'When?'} r = update(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') r = query('select * from "%s" limit 2' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') def testUpsert(self): upsert = self.db.upsert query = self.db.query self.assertRaises(pg.ProgrammingError, upsert, 'test', i2=2, i4=4, i8=8) table = 'upsert_test_table' self.createTable(table, 'n integer primary key, t text') s = dict(n=1, t='x') try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x') s.update(n=2, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') q = 'select n, t from "%s" order by n limit 3' % table r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='y') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='n') r = upsert(table, s, t="included.t || '2'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y2') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y2')]) s.update(t='y') r = upsert(table, s, t="excluded.t || '3'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y3') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y3')]) s.update(n=1, t='2') r = upsert(table, s, t="included.t || excluded.t") self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x2') r = query(q).getresult() self.assertEqual(r, [(1, 'x2'), (2, 'y3')]) # not existing columns and oid parameter should be ignored s = dict(m=3, u='z') r = upsert(table, s, oid='invalid') self.assertIs(r, s) def testUpsertWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") upsert = self.db.upsert get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=[1]) self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2)) r = get('test_table', 1, 'n') self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) oid = r[qoid] self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2, oid=oid)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=2) try: r = upsert('test_table', s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, None), (2, None)]) r['oid'] = oid r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertNotEqual(r[qoid], oid) r['m'] = 7 r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r.update(n=1, m=3) r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 3), (2, 7)]) r = upsert('test_table', r, oid='invalid') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 5) r.update(n=2, m=1) r = upsert('test_table', r, m='included.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r['m'] = 9 r = upsert('test_table', r, m='excluded.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 9) r['m'] = 8 r = upsert('test_table *', r, m='included.m + 1') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 10) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 5), (2, 10)]) def testUpsertWithCompositeKey(self): upsert = self.db.upsert query = self.db.query table = 'upsert_test_table_2' self.createTable( table, 'n integer, m integer, t text, primary key (n, m)') s = dict(n=1, m=2, t='x') try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 2) self.assertEqual(r['t'], 'x') s.update(m=3, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') q = 'select n, m, t from "%s" order by n, m limit 3' % table r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'n') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n')]) s.update(n=2, t='y') r = upsert(table, s, t="'z'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n'), (2, 3, 'y')]) s.update(n=1, t='m') r = upsert(table, s, t='included.t || excluded.t') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'nm') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'nm'), (2, 3, 'y')]) def testUpsertWithQuotedNames(self): upsert = self.db.upsert query = self.db.query table = 'test table for upsert()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') s = {'Prime!': 31, 'much space': 9009, 'Questions?': 'Yes.'} try: r = upsert(table, s) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'Yes.') q = 'select * from "%s" limit 2' % table r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'Yes.')]) s.update({'Questions?': 'No.'}) r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'No.') r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'No.')]) def testClear(self): clear = self.db.clear f = False if pg.get_bool() else 'f' r = clear('test') result = dict( i2=0, i4=0, i8=0, d=0, f4=0, f8=0, m=0, v4='', c4='', t='') self.assertEqual(r, result) table = 'clear_test_table' self.createTable(table, 'n integer, f float, b boolean, d date, t text') r = clear(table) result = dict(n=0, f=0, b=f, d='', t='') self.assertEqual(r, result) r['a'] = r['f'] = r['n'] = 1 r['d'] = r['t'] = 'x' r['b'] = 't' r['oid'] = long(1) r = clear(table, r) result = dict(a=1, n=0, f=0, b=f, d='', t='', oid=long(1)) self.assertEqual(r, result) def testClearWithQuotedNames(self): clear = self.db.clear table = 'test table for clear()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r = clear(table) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 0) self.assertEqual(r['much space'], 0) self.assertEqual(r['Questions?'], '') def testDelete(self): delete = self.db.delete query = self.db.query self.assertRaises(pg.ProgrammingError, delete, 'test', dict(i2=2, i4=4, i8=8)) table = 'delete_test_table' self.createTable(table, 'n integer primary key, t text', oids=False, values=enumerate('xyz', start=1)) self.assertRaises(pg.DatabaseError, self.db.get, table, 4) r = self.db.get(table, 1) s = delete(table, r) self.assertEqual(s, 1) r = self.db.get(table, 3) s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) r = query('select * from "%s"' % table).dictresult() self.assertEqual(len(r), 1) r = r[0] result = {'n': 2, 't': 'y'} self.assertEqual(r, result) r = self.db.get(table, 2) s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) self.assertRaises(pg.DatabaseError, self.db.get, table, 2) # not existing columns and oid parameter should be ignored r.update(m=3, u='z', oid='invalid') s = delete(table, r) self.assertEqual(s, 0) def testDeleteWithOids(self): if not self.oids: self.skipTest("database does not support tables with oids") delete = self.db.delete get = self.db.get query = self.db.query self.createTable('test_table', 'n int', oids=True, values=range(1, 7)) r = dict(n=3) self.assertRaises(pg.ProgrammingError, delete, 'test_table', r) s = get('test_table', 1, 'n') qoid = 'oid(test_table)' self.assertIn(qoid, s) r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) q = "select min(n),count(n) from test_table" self.assertEqual(query(q).getresult()[0], (2, 5)) oid = get('test_table', 2, 'n')[qoid] s = dict(oid=oid, n=2) self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', None, oid=oid) self.assertEqual(r, 1) r = delete('test_table', None, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) s = dict(oid=oid, n=2) oid = get('test_table', 3, 'n')[qoid] self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) s = get('test_table', 4, 'n') r = delete('test_table *', s) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) oid = get('test_table', 5, 'n')[qoid] s = {qoid: oid, 'm': 4} r = delete('test_table', s, m=6) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (6, 1)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) for i in range(5): query("insert into test_table values (%d, %d)" % (i + 1, i + 2)) s = dict(m=2) self.assertRaises(KeyError, delete, 'test_table', s) s = dict(m=2, oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', dict(m=2), oid=oid) self.assertEqual(r, 0) oid = get('test_table', 1, 'n')[qoid] s = dict(oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (2, 5)) s = get('test_table', 2, 'n') del s['n'] r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) r = delete('test_table', n=3) self.assertEqual(r, 1) r = delete('test_table', n=3) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) r = delete('test_table', None, n=4) self.assertEqual(r, 1) r = delete('test_table', None, n=4) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) s = dict(n=6) r = delete('test_table', s, n=5) self.assertEqual(r, 1) r = delete('test_table', s, n=5) self.assertEqual(r, 0) s = get('test_table', 6, 'n') self.assertEqual(s['n'], 6) s['n'] = 7 r = delete('test_table', s) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (None, 0)) def testDeleteWithCompositeKey(self): query = self.db.query table = 'delete_test_table_1' self.createTable(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, self.db.delete, table, dict(t='b')) self.assertEqual(self.db.delete(table, dict(n=2)), 1) r = query('select t from "%s" where n=2' % table).getresult() self.assertEqual(r, []) self.assertEqual(self.db.delete(table, dict(n=2)), 0) r = query('select t from "%s" where n=3' % table).getresult()[0][0] self.assertEqual(r, 'c') table = 'delete_test_table_2' self.createTable(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, self.db.delete, table, dict(n=2, t='b')) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 1) r = [r[0] for r in query('select t from "%s" where n=2' ' order by m' % table).getresult()] self.assertEqual(r, ['c']) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 0) r = [r[0] for r in query('select t from "%s" where n=3' ' order by m' % table).getresult()] self.assertEqual(r, ['e', 'f']) self.assertEqual(self.db.delete(table, dict(n=3, m=1)), 1) r = [r[0] for r in query('select t from "%s" where n=3' ' order by m' % table).getresult()] self.assertEqual(r, ['f']) def testDeleteWithQuotedNames(self): delete = self.db.delete query = self.db.query table = 'test table for delete()' self.createTable(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(19, 5005, 'Yes!')]) r = {'Prime!': 17} r = delete(table, r) self.assertEqual(r, 0) r = query('select count(*) from "%s"' % table).getresult() self.assertEqual(r[0][0], 1) r = {'Prime!': 19} r = delete(table, r) self.assertEqual(r, 1) r = query('select count(*) from "%s"' % table).getresult() self.assertEqual(r[0][0], 0) def testDeleteReferenced(self): delete = self.db.delete query = self.db.query self.createTable('test_parent', 'n smallint primary key', values=range(3)) self.createTable('test_child', 'n smallint primary key references test_parent', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") self.assertEqual(query(q).getresult()[0], (3, 3)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', None, n=2) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', None, n=2) r = delete('test_child', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (3, 2)) r = delete('test_parent', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 2)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', dict(n=0)) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', dict(n=0)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 1)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 0) r = delete('test_parent', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (1, 1)) r = delete('test_parent', None, n=0) self.assertEqual(r, 0) q = "select n from test_parent natural join test_child limit 2" self.assertEqual(query(q).getresult(), [(1,)]) def testTempCrud(self): table = 'test_temp_table' self.createTable(table, "n int primary key, t varchar", temporary=True) self.db.insert(table, dict(n=1, t='one')) self.db.insert(table, dict(n=2, t='too')) self.db.insert(table, dict(n=3, t='three')) r = self.db.get(table, 2) self.assertEqual(r['t'], 'too') self.db.update(table, dict(n=2, t='two')) r = self.db.get(table, 2) self.assertEqual(r['t'], 'two') self.db.delete(table, r) r = self.db.query('select n, t from %s order by 1' % table).getresult() self.assertEqual(r, [(1, 'one'), (3, 'three')]) def testTruncate(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, None) self.assertRaises(TypeError, truncate, 42) self.assertRaises(TypeError, truncate, dict(test_table=None)) query = self.db.query self.createTable('test_table', 'n smallint', temporary=False, values=[1] * 3) q = "select count(*) from test_table" r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) for i in range(3): query("insert into test_table values (1)") r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('public.test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) self.createTable('test_table_2', 'n smallint', temporary=True) for t in (list, tuple, set): for i in range(3): query("insert into test_table values (1)") query("insert into test_table_2 values (2)") q = ("select (select count(*) from test_table)," " (select count(*) from test_table_2)") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate(t(['test_table', 'test_table_2'])) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def testTruncateRestart(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', restart='invalid') query = self.db.query self.createTable('test_table', 'n serial, t text') for n in range(3): query("insert into test_table (t) values ('test')") q = "select count(n), min(n), max(n) from test_table" r = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) truncate('test_table') r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 4, 6)) truncate('test_table', restart=True) r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) def testTruncateCascade(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', cascade='invalid') query = self.db.query self.createTable('test_parent', 'n smallint primary key', values=range(3)) self.createTable('test_child', 'n smallint primary key references test_parent (n)', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate(['test_parent', 'test_child']) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (%d)" % n) query("insert into test_child (n) values (%d)" % n) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (%d)" % n) query("insert into test_child (n) values (%d)" % n) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_child') r = query(q).getresult()[0] self.assertEqual(r, (3, 0)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def testTruncateOnly(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', only='invalid') query = self.db.query self.createTable('test_parent', 'n smallint') self.createTable('test_child', 'm smallint) inherits (test_parent') for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent*') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent', only=True) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) self.assertRaises(ValueError, truncate, 'test_parent*', only=True) truncate('test_parent*', only=False) self.createTable('test_parent_2', 'n smallint') self.createTable('test_child_2', 'm smallint) inherits (test_parent_2') for t in '', '_2': for n in range(3): query("insert into test_parent%s (n) values (1)" % t) query("insert into test_child%s (n, m) values (2, 3)" % t) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)," " (select count(*) from test_parent_2)," " (select count(*) from test_child_2)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3, 6, 3)) truncate(['test_parent', 'test_parent_2'], only=[False, True]) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 3, 3)) truncate(['test_parent', 'test_parent_2'], only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 0, 0)) self.assertRaises(ValueError, truncate, ['test_parent*', 'test_child'], only=[True, False]) truncate(['test_parent*', 'test_child'], only=[False, True]) def testTruncateQuoted(self): truncate = self.db.truncate query = self.db.query table = "test table for truncate()" self.createTable(table, 'n smallint', temporary=False, values=[1] * 3) q = 'select count(*) from "%s"' % table r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate(table) r = query(q).getresult()[0][0] self.assertEqual(r, 0) for i in range(3): query('insert into "%s" values (1)' % table) r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('public."%s"' % table) r = query(q).getresult()[0][0] self.assertEqual(r, 0) def testGetAsList(self): get_as_list = self.db.get_as_list self.assertRaises(TypeError, get_as_list) self.assertRaises(TypeError, get_as_list, None) query = self.db.query table = 'test_aslist' r = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') names = [(1, 'Homer'), (2, 'Marge'), (3, 'Bart'), (4, 'Lisa'), (5, 'Maggie')] self.createTable(table, 'id smallint primary key, name varchar', values=names) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) for t, n in zip(r, names): self.assertIsInstance(t, tuple) self.assertEqual(t, n) if named: self.assertEqual(t.id, n[0]) self.assertEqual(t.name, n[1]) self.assertEqual(t._asdict(), dict(id=n[0], name=n[1])) r = get_as_list(table, what='name') self.assertIsInstance(r, list) expected = sorted((row[1],) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name, id') self.assertIsInstance(r, list) expected = sorted(tuple(reversed(row)) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what=['name', 'id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, where="name like 'Ba%'") self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, what='name', where="name like 'Ma%'") self.assertIsInstance(r, list) self.assertEqual(r, [('Maggie',), ('Marge',)]) r = get_as_list(table, what='name', where=["name like 'Ma%'", "name like '%r%'"]) self.assertIsInstance(r, list) self.assertEqual(r, [('Marge',)]) r = get_as_list(table, what='name', order='id') self.assertIsInstance(r, list) expected = [(row[1],) for row in names] self.assertEqual(r, expected) r = get_as_list(table, what=['name'], order=['id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, what=['id', 'name'], order=['id', 'name']) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='id * 2 as num', order='id desc') self.assertIsInstance(r, list) expected = [(n,) for n in range(10, 0, -2)] self.assertEqual(r, expected) r = get_as_list(table, limit=2) self.assertIsInstance(r, list) self.assertEqual(r, names[:2]) r = get_as_list(table, offset=3) self.assertIsInstance(r, list) self.assertEqual(r, names[3:]) r = get_as_list(table, limit=1, offset=2) self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, list(range(1, 6))) r = get_as_list(table, what='name', scalar=True) self.assertIsInstance(r, list) expected = sorted(row[1] for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name', limit=1, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, expected[:1]) query('alter table "%s" drop constraint "%s_pkey"' % (table, table)) self.assertRaises(KeyError, self.db.pkey, table, flush=True) names.insert(1, (1, 'Snowball')) query('insert into "%s" values ($1, $2)' % table, (1, 'Snowball')) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='name', where='id=1', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, ['Homer', 'Snowball']) # test with unordered query r = get_as_list(table, order=False) self.assertIsInstance(r, list) self.assertEqual(set(r), set(names)) # test with arbitrary from clause from_table = '(select lower(name) as n2 from "%s") as t2' % table r = get_as_list(from_table) self.assertIsInstance(r, list) r = set(row[0] for row in r) expected = set(row[1].lower() for row in names) self.assertEqual(r, expected) r = get_as_list(from_table, order='n2', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, sorted(expected)) r = get_as_list(from_table, order='n2', limit=1) self.assertIsInstance(r, list) self.assertEqual(len(r), 1) t = r[0] self.assertIsInstance(t, tuple) if named: self.assertEqual(t.n2, 'bart') self.assertEqual(t._asdict(), dict(n2='bart')) else: self.assertEqual(t, ('bart',)) def testGetAsDict(self): get_as_dict = self.db.get_as_dict self.assertRaises(TypeError, get_as_dict) self.assertRaises(TypeError, get_as_dict, None) # the test table has no primary key self.assertRaises(pg.ProgrammingError, get_as_dict, 'test') query = self.db.query table = 'test_asdict' r = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') colors = [(1, '#7cb9e8', 'Aero'), (2, '#b5a642', 'Brass'), (3, '#b2ffff', 'Celeste'), (4, '#c19a6b', 'Desert')] self.createTable(table, 'id smallint primary key, rgb char(7), name varchar', values=colors) # keyname must be string, list or tuple self.assertRaises(KeyError, get_as_dict, table, 3) self.assertRaises(KeyError, get_as_dict, table, dict(id=None)) # missing keyname in row self.assertRaises(KeyError, get_as_dict, table, keyname='rgb', what='name') r = get_as_dict(table) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], row[1:]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.rgb, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(rgb=t[0], name=t[1])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb') self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[1], (row[0], row[2])) for row in sorted(colors, key=itemgetter(1))) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.id, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(id=t[0], name=t[1])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb']) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[:2], row[2:]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) self.assertIsInstance(key[0], int) self.assertIsInstance(key[1], str) if named: self.assertEqual(key, (key.id, key.rgb)) self.assertEqual(key._fields, ('id', 'rgb')) row = r[key] self.assertIsInstance(row, tuple) self.assertIsInstance(row[0], str) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.name, t[0]) self.assertEqual(row._asdict(), dict(name=t[0])) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb'], scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[:2], row[2]) for row in colors) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb', what=['rgb', 'name'], scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[1], row[2]) for row in sorted(colors, key=itemgetter(1))) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, what='id, name', where="rgb like '#b%'", scalar=True) self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], row[2]) for row in colors[1:3]) self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) if OrderedDict is not dict: # Python > 2.6 self.assertEqual(r.keys(), expected.keys()) expected = r r = get_as_dict(table, what=['name', 'id'], where=['id > 1', 'id < 4', "rgb like '#b%'", "name not like 'A%'", "name not like '%t'"], scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, what='name, id', limit=2, offset=1, scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, keyname=('id',), what=('name', 'id'), where=('id > 1', 'id < 4'), order=('id',), scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, limit=1) self.assertEqual(len(r), 1) self.assertEqual(r[1][1], 'Aero') r = get_as_dict(table, offset=3) self.assertEqual(len(r), 1) self.assertEqual(r[4][1], 'Desert') r = get_as_dict(table, order='id desc') expected = OrderedDict((row[0], row[1:]) for row in reversed(colors)) self.assertEqual(r, expected) r = get_as_dict(table, where='id > 5') self.assertIsInstance(r, OrderedDict) self.assertEqual(len(r), 0) # test with unordered query expected = dict((row[0], row[1:]) for row in colors) r = get_as_dict(table, order=False) self.assertIsInstance(r, dict) self.assertEqual(r, expected) if dict is not OrderedDict: # Python > 2.6 self.assertNotIsInstance(self, OrderedDict) # test with arbitrary from clause from_table = '(select id, lower(name) as n2 from "%s") as t2' % table # primary key must be passed explicitly in this case self.assertRaises(pg.ProgrammingError, get_as_dict, from_table) r = get_as_dict(from_table, 'id') self.assertIsInstance(r, OrderedDict) expected = OrderedDict((row[0], (row[2].lower(),)) for row in colors) self.assertEqual(r, expected) # test without a primary key query('alter table "%s" drop constraint "%s_pkey"' % (table, table)) self.assertRaises(KeyError, self.db.pkey, table, flush=True) self.assertRaises(pg.ProgrammingError, get_as_dict, table) r = get_as_dict(table, keyname='id') expected = OrderedDict((row[0], row[1:]) for row in colors) self.assertIsInstance(r, dict) self.assertEqual(r, expected) r = (1, '#007fff', 'Azure') query('insert into "%s" values ($1, $2, $3)' % table, r) # the last entry will win expected[1] = r[1:] r = get_as_dict(table, keyname='id') self.assertEqual(r, expected) def testTransaction(self): query = self.db.query self.createTable('test_table', 'n integer', temporary=False) self.db.begin() query("insert into test_table values (1)") query("insert into test_table values (2)") self.db.commit() self.db.begin() query("insert into test_table values (3)") query("insert into test_table values (4)") self.db.rollback() self.db.begin() query("insert into test_table values (5)") self.db.savepoint('before6') query("insert into test_table values (6)") self.db.rollback('before6') query("insert into test_table values (7)") self.db.commit() self.db.begin() self.db.savepoint('before8') query("insert into test_table values (8)") self.db.release('before8') self.assertRaises(pg.InternalError, self.db.rollback, 'before8') self.db.commit() self.db.start() query("insert into test_table values (9)") self.db.end() r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7, 9]) self.db.begin(mode='read only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.rollback() self.db.start(mode='Read Only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.abort() def testTransactionAliases(self): self.assertEqual(self.db.begin, self.db.start) self.assertEqual(self.db.commit, self.db.end) self.assertEqual(self.db.rollback, self.db.abort) def testContextManager(self): query = self.db.query self.createTable('test_table', 'n integer check(n>0)') with self.db: query("insert into test_table values (1)") query("insert into test_table values (2)") try: with self.db: query("insert into test_table values (3)") query("insert into test_table values (4)") raise ValueError('test transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'test transaction should rollback') with self.db: query("insert into test_table values (5)") try: with self.db: query("insert into test_table values (6)") query("insert into test_table values (-1)") except pg.IntegrityError as error: self.assertTrue('check' in str(error)) with self.db: query("insert into test_table values (7)") r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7]) def testBytea(self): query = self.db.query self.createTable('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.escape_bytea(s) query('insert into bytea_test values(3, $1)', (r,)) r = query('select * from bytea_test where n=3').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 3) r = r[1] if pg.get_bytea_escaped(): self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def testInsertUpdateGetBytea(self): query = self.db.query unescape = pg.unescape_bytea if pg.get_bytea_escaped() else None self.createTable('bytea_test', 'n smallint primary key, data bytea') # insert null value r = self.db.insert('bytea_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) s = b'None' r = self.db.update('bytea_test', n=0, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.update('bytea_test', n=0, data=None) self.assertIsNone(r['data']) # insert as bytes s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.insert('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) # update as bytes s += b"and now even more \x00 nasty \t stuff!\f" r = self.db.update('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = query('select * from bytea_test where n=5').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 5) r = r[1] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.get('bytea_test', dict(n=5)) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def testUpsertBytea(self): self.createTable('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = dict(n=7, data=s) try: r = self.db.upsert('bytea_test', r) except pg.ProgrammingError as error: if self.db.server_version < 90500: self.skipTest('database does not support upsert') self.fail(str(error)) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 7) self.assertIn('data', r) if pg.get_bytea_escaped(): self.assertNotEqual(r['data'], s) r['data'] = pg.unescape_bytea(r['data']) self.assertIsInstance(r['data'], bytes) self.assertEqual(r['data'], s) r['data'] = None r = self.db.upsert('bytea_test', r) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 7) self.assertIn('data', r) self.assertIsNone(r['data']) def testInsertGetJson(self): try: self.createTable('json_test', 'n smallint primary key, data json') except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('json_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('json_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('json_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('json_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) # insert JSON object as text self.db.insert('json_test', n=2, data=json.dumps(data)) q = "select data from json_test where n in (1, 2) order by n" r = self.db.query(q).getresult() self.assertEqual(len(r), 2) self.assertIsInstance(r[0][0], str if jsondecode is None else dict) self.assertEqual(r[0][0], r[1][0]) def testInsertGetJsonb(self): try: self.createTable('jsonb_test', 'n smallint primary key, data jsonb') except pg.ProgrammingError as error: if self.db.server_version < 90400: self.skipTest('database does not support jsonb') self.fail(str(error)) jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('jsonb_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('jsonb_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('jsonb_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('jsonb_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], unicode) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def testArray(self): returns_arrays = pg.get_array() self.createTable('arraytest', 'id smallint, i2 smallint[], i4 integer[], i8 bigint[],' ' d numeric[], f4 real[], f8 double precision[], m money[],' ' b bool[], v4 varchar(4)[], c4 char(4)[], t text[]') r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( id='smallint', i2='smallint[]', i4='integer[]', i8='bigint[]', d='numeric[]', f4='real[]', f8='double precision[]', m='money[]', b='boolean[]', v4='character varying[]', c4='character[]', t='text[]')) else: self.assertEqual(r, dict( id='int', i2='int[]', i4='int[]', i8='int[]', d='num[]', f4='float[]', f8='float[]', m='money[]', b='bool[]', v4='text[]', c4='text[]', t='text[]')) decimal = pg.get_decimal() if decimal is Decimal: long_decimal = decimal('123456789.123456789') odd_money = decimal('1234567891234567.89') else: long_decimal = decimal('12345671234.5') odd_money = decimal('1234567123.25') t, f = (True, False) if pg.get_bool() else ('t', 'f') data = dict(id=42, i2=[42, 1234, None, 0, -1], i4=[42, 123456789, None, 0, 1, -1], i8=[long(42), long(123456789123456789), None, long(0), long(1), long(-1)], d=[decimal(42), long_decimal, None, decimal(0), decimal(1), decimal(-1), -long_decimal], f4=[42.0, 1234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], f8=[42.0, 12345671234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], m=[decimal('42.00'), odd_money, None, decimal('0.00'), decimal('1.00'), decimal('-1.00'), -odd_money], b=[t, f, t, None, f, t, None, None, t], v4=['abc', '"Hi"', '', None], c4=['abc ', '"Hi"', ' ', None], t=['abc', 'Hello, World!', '"Hello, World!"', '', None]) r = data.copy() self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') self.db.insert('arraytest', r) r = self.db.get('arraytest', 42, 'id') if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') r = self.db.query('select * from arraytest limit 1').dictresult()[0] if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') def testArrayLiteral(self): insert = self.db.insert returns_arrays = pg.get_array() self.createTable('arraytest', 'i int[], t text[]') r = dict(i=[1, 2, 3], t=['a', 'b', 'c']) insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i='{1,2,3}', t='{a,b,c}') self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') L = pg.Literal r = dict(i=L("ARRAY[1, 2, 3]"), t=L("ARRAY['a', 'b', 'c']")) self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i="1, 2, 3", t="'a', 'b', 'c'") self.assertRaises(pg.DataError, self.db.insert, 'arraytest', r) def testArrayOfIds(self): array_on = pg.get_array() self.createTable( 'arraytest', 'i serial primary key, c cid[], o oid[], x xid[]') r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( i='integer', c='cid[]', o='oid[]', x='xid[]')) else: self.assertEqual(r, dict( i='int', c='int[]', o='int[]', x='int[]')) data = dict(i=1, c=[11, 12, 13], o=[21, 22, 23], x=[31, 32, 33]) r = data.copy() self.db.insert('arraytest', r) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') self.db.get('arraytest', r) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') def testArrayOfText(self): array_on = pg.get_array() self.createTable('arraytest', 'id serial primary key, data text[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = ['Hello, World!', '', None, '{a,b,c}', '"Hi!"', 'null', 'NULL', 'Null', 'nulL', "It's all \\ kinds of\r nasty stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) def testArrayOfBytea(self): array_on = pg.get_array() bytea_escaped = pg.get_bytea_escaped() self.createTable('arraytest', 'id serial primary key, data bytea[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'bytea[]') data = [b'Hello, World!', b'', None, b'{a,b,c}', b'"Hi!"', b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) def testArrayOfJson(self): try: self.createTable( 'arraytest', 'id serial primary key, data json[]') except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') def testArrayOfJsonb(self): try: self.createTable( 'arraytest', 'id serial primary key, data jsonb[]') except pg.ProgrammingError as error: if self.db.server_version < 90400: self.skipTest('database does not support jsonb') self.fail(str(error)) r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'jsonb[]' if self.regtypes else 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') def testDeepArray(self): array_on = pg.get_array() self.createTable( 'arraytest', 'id serial primary key, data text[][][]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = [[['Hello, World!', '{a,b,c}', 'back\\slash']]] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) def testInsertUpdateGetRecord(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint, married bool,' ' weight real, salary money)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'id serial primary key, person test_person_type', oids=False, temporary=False) attnames = self.db.get_attnames('test_person') self.assertEqual(len(attnames), 2) self.assertIn('id', attnames) self.assertIn('person', attnames) person_typ = attnames['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict(name='character varying', age='smallint', married='boolean', weight='real', salary='money')) else: self.assertEqual(person_typ.attnames, dict(name='text', age='int', married='bool', weight='float', salary='money')) decimal = pg.get_decimal() if pg.get_bool(): bool_class = bool t, f = True, False else: bool_class = str t, f = 't', 'f' person = ('John Doe', 61, t, 99.5, decimal('93456.75')) r = self.db.insert('test_person', None, person=person) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = ('Jane Roe', 59, f, 64.5, decimal('96543.25')) r['person'] = person self.db.update('test_person', r) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = (None,) * 5 r = self.db.insert('test_person', None, person=person) self.assertEqual(r['id'], 2) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 2) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r = self.db.insert('test_person', None, person=None) self.assertEqual(r['id'], 3) self.assertIsNone(r['person']) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 3) self.assertIsNone(r['person']) def testRecordInsertBytea(self): query = self.db.query query('create type test_person_type as' ' (name text, picture bytea)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', picture='bytea')) person = ('John Doe', b'O\x00ps\xff!') r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.picture, person[1]) self.assertIsInstance(p.picture, bytes) def testRecordInsertJson(self): query = self.db.query try: query('create type test_person_type as' ' (name text, data json)') except pg.ProgrammingError as error: if self.db.server_version < 90200: self.skipTest('database does not support json') self.fail(str(error)) self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', data='json')) person = ('John Doe', dict(age=61, married=True, weight=99.5)) r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) if pg.get_jsondecode() is None: p = p._replace(data=json.loads(p.data)) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.data, person[1]) self.assertIsInstance(p.data, dict) def testRecordLiteral(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint)') self.addCleanup(query, 'drop type test_person_type') self.createTable('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict(name='character varying', age='smallint')) else: self.assertEqual(person_typ.attnames, dict(name='text', age='int')) person = pg.Literal("('John Doe', 61)") r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.age, 61) self.assertIsInstance(p.age, int) def testDate(self): query = self.db.query for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = date(2016, 3, 14) q = "select $1::date" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, date) self.assertEqual(r, d) q = "select '10000-08-01'::date, '0099-01-08 BC'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) q = "select 'infinity'::date, '-infinity'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) def testTime(self): query = self.db.query d = time(15, 9, 26) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def testTimetz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = '%+03d00' % timezones[timezone] try: tzinfo = datetime.strptime(tz, '%z').tzinfo except ValueError: # Python < 3.2 tzinfo = pg._get_timezone(tz) self.db.set_parameter('timezone', timezone) d = time(15, 9, 26, tzinfo=tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897, tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def testTimestamp(self): query = self.db.query for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamp," " '0099-01-08 BC'::timestamp") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamp, '-infinity'::timestamp" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def testTimestamptz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = '%+03d00' % timezones[timezone] try: tzinfo = datetime.strptime(tz, '%z').tzinfo except ValueError: # Python < 3.2 tzinfo = pg._get_timezone(tz) self.db.set_parameter('timezone', timezone) for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897, tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamptz," " '0099-01-08 BC'::timestamptz") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamptz, '-infinity'::timestamptz" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def testInterval(self): query = self.db.query for intervalstyle in ( 'sql_standard', 'postgres', 'postgres_verbose', 'iso_8601'): self.db.set_parameter('intervalstyle', intervalstyle) d = timedelta(3) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(-30) r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(hours=3, minutes=31, seconds=42, microseconds=5678) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) def testDateAndTimeArrays(self): dt = (date(2016, 3, 14), time(15, 9, 26)) q = "select ARRAY[$1::date], ARRAY[$2::time]" r = self.db.query(q, dt).getresult()[0] self.assertIsInstance(r[0], list) self.assertEqual(r[0][0], dt[0]) self.assertIsInstance(r[1], list) self.assertEqual(r[1][0], dt[1]) def testHstore(self): try: self.db.query("select 'k=>v'::hstore") except pg.DatabaseError: try: self.db.query("create extension hstore") except pg.DatabaseError: self.skipTest("hstore extension not enabled") d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} q = "select $1::hstore" r = self.db.query(q, (pg.Hstore(d),)).getresult()[0][0] self.assertIsInstance(r, dict) self.assertEqual(r, d) def testUuid(self): d = UUID('{12345678-1234-5678-1234-567812345678}') q = 'select $1::uuid' r = self.db.query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, UUID) self.assertEqual(r, d) def testDbTypesInfo(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('numeric', dbtypes) typ = dbtypes['numeric'] self.assertIn('numeric', dbtypes) self.assertEqual(typ, 'numeric' if self.regtypes else 'num') self.assertEqual(typ.oid, 1700) self.assertEqual(typ.pgtype, 'numeric') self.assertEqual(typ.regtype, 'numeric') self.assertEqual(typ.simple, 'num') self.assertEqual(typ.typtype, 'b') self.assertEqual(typ.category, 'N') self.assertEqual(typ.delim, ',') self.assertEqual(typ.relid, 0) self.assertIs(dbtypes[1700], typ) self.assertNotIn('pg_type', dbtypes) typ = dbtypes['pg_type'] self.assertIn('pg_type', dbtypes) self.assertEqual(typ, 'pg_type' if self.regtypes else 'record') self.assertIsInstance(typ.oid, int) self.assertEqual(typ.pgtype, 'pg_type') self.assertEqual(typ.regtype, 'pg_type') self.assertEqual(typ.simple, 'record') self.assertEqual(typ.typtype, 'c') self.assertEqual(typ.category, 'C') self.assertEqual(typ.delim, ',') self.assertNotEqual(typ.relid, 0) attnames = typ.attnames self.assertIsInstance(attnames, dict) self.assertIs(attnames, dbtypes.get_attnames('pg_type')) self.assertIn('typname', attnames) typname = attnames['typname'] self.assertEqual(typname, 'name' if self.regtypes else 'text') self.assertEqual(typname.typtype, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertIn('typlen', attnames) typlen = attnames['typlen'] self.assertEqual(typlen, 'smallint' if self.regtypes else 'int') self.assertEqual(typlen.typtype, 'b') # base self.assertEqual(typlen.category, 'N') # numeric def testDbTypesTypecast(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast('int4') self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast() self.assertIs(dbtypes.get_typecast('int4'), int) self.assertNotIn('circle', dbtypes) self.assertIsNone(dbtypes.get_typecast('circle')) squared_circle = lambda v: 'Squared Circle: %s' % v dbtypes.set_typecast('circle', squared_circle) self.assertIs(dbtypes.get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') self.assertEqual(dbtypes.typecast('Impossible', 'circle'), 'Squared Circle: Impossible') dbtypes.reset_typecast('circle') self.assertIsNone(dbtypes.get_typecast('circle')) def testGetSetTypeCast(self): get_typecast = pg.get_typecast set_typecast = pg.set_typecast dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertNotIn('real', dbtypes) self.assertNotIn('bool', dbtypes) self.assertIs(get_typecast('int4'), int) self.assertIs(get_typecast('float4'), float) self.assertIs(get_typecast('bool'), pg.cast_bool) cast_circle = get_typecast('circle') self.addCleanup(set_typecast, 'circle', cast_circle) squared_circle = lambda v: 'Squared Circle: %s' % v self.assertNotIn('circle', dbtypes) set_typecast('circle', squared_circle) self.assertNotIn('circle', dbtypes) self.assertIs(get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') set_typecast('circle', cast_circle) self.assertIs(get_typecast('circle'), cast_circle) def testNotificationHandler(self): # the notification handler itself is tested separately f = self.db.notification_handler callback = lambda arg_dict: None handler = f('test', callback) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test') self.assertEqual(handler.stop_event, 'stop_test') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test2', callback, timeout=2) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test2') self.assertEqual(handler.stop_event, 'stop_test2') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertEqual(handler.timeout, 2) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 3} handler = f('test3', callback, arg_dict=arg_dict) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test3') self.assertEqual(handler.stop_event, 'stop_test3') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 3) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test4', callback, stop_event='stop4') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test4') self.assertEqual(handler.stop_event, 'stop4') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 5} handler = f('test5', callback, arg_dict, 1.5, 'stop5') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test5') self.assertEqual(handler.stop_event, 'stop5') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 5) self.assertEqual(handler.timeout, 1.5) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) class TestDBClassNonStdOpts(TestDBClass): """Test the methods of the DB class with non-standard global options.""" @classmethod def setUpClass(cls): cls.saved_options = {} cls.set_option('decimal', float) not_bool = not pg.get_bool() cls.set_option('bool', not_bool) not_array = not pg.get_array() cls.set_option('array', not_array) not_bytea_escaped = not pg.get_bytea_escaped() cls.set_option('bytea_escaped', not_bytea_escaped) cls.set_option('jsondecode', None) db = DB() cls.regtypes = not db.use_regtypes() db.close() super(TestDBClassNonStdOpts, cls).setUpClass() @classmethod def tearDownClass(cls): super(TestDBClassNonStdOpts, cls).tearDownClass() cls.reset_option('jsondecode') cls.reset_option('bool') cls.reset_option('array') cls.reset_option('bytea_escaped') cls.reset_option('decimal') @classmethod def set_option(cls, option, value): cls.saved_options[option] = getattr(pg, 'get_' + option)() return getattr(pg, 'set_' + option)(value) @classmethod def reset_option(cls, option): return getattr(pg, 'set_' + option)(cls.saved_options[option]) class TestDBClassAdapter(unittest.TestCase): """Test the adapter object associated with the DB class.""" def setUp(self): self.db = DB() self.adapter = self.db.adapter def tearDown(self): try: self.db.close() except pg.InternalError: pass def testGuessSimpleType(self): f = self.adapter.guess_simple_type self.assertEqual(f(pg.Bytea(b'test')), 'bytea') self.assertEqual(f('string'), 'text') self.assertEqual(f(b'string'), 'text') self.assertEqual(f(True), 'bool') self.assertEqual(f(3), 'int') self.assertEqual(f(2.75), 'float') self.assertEqual(f(Decimal('4.25')), 'num') self.assertEqual(f(date(2016, 1, 30)), 'date') self.assertEqual(f([1, 2, 3]), 'int[]') self.assertEqual(f([[[123]]]), 'int[]') self.assertEqual(f(['a', 'b', 'c']), 'text[]') self.assertEqual(f([[['abc']]]), 'text[]') self.assertEqual(f([False, True]), 'bool[]') self.assertEqual(f([[[False]]]), 'bool[]') r = f(('string', True, 3, 2.75, [1], [False])) self.assertEqual(r, 'record') self.assertEqual(list(r.attnames.values()), ['text', 'bool', 'int', 'float', 'int[]', 'bool[]']) def testAdaptQueryTypedList(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', (1, 2), ('int2',)) self.assertRaises(TypeError, format_query, '%s,%s', (1,), ('int2', 'int2')) values = (3, 7.5, 'hello', True) types = ('int4', 'float4', 'text', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) types = ('bool', 'bool', 'bool', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, ['t', 't', 'f', 't']) values = ('2016-01-30', 'current_date') types = ('date', 'date') sql, params = format_query("values(%s,%s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = ([1, 2, 3], ['a', 'b', 'c']) types = ('_int4', '_text') sql, params = format_query("%s::int4[],%s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = ('_bool', '_bool') sql, params = format_query("%s::bool[],%s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] t = self.adapter.simple_type typ = t('record') typ._get_attnames = lambda _self: pg.AttrDict([ ('i', t('int')), ('f', t('float')), ('t', t('text')), ('b', t('bool')), ('i3', t('int[]')), ('t3', t('text[]'))]) types = [typ] sql, params = format_query('select %s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryTypedDict(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', dict(i1=1, i2=2), dict(i1='int2')) values = dict(i=3, f=7.5, t='hello', b=True) types = dict(i='int4', f='float4', t='text', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) types = dict(i='bool', f='bool', t='bool', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 't', 't', 'f']) values = dict(d1='2016-01-30', d2='current_date') types = dict(d1='date', d2='date') sql, params = format_query("values(%(d1)s,%(d2)s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = dict(i=[1, 2, 3], t=['a', 'b', 'c']) types = dict(i='_int4', t='_text') sql, params = format_query( "%(i)s::int4[],%(t)s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = dict(i='_bool', t='_bool') sql, params = format_query( "%(i)s::bool[],%(t)s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) t = self.adapter.simple_type typ = t('record') typ._get_attnames = lambda _self: pg.AttrDict([ ('i', t('int')), ('f', t('float')), ('t', t('text')), ('b', t('bool')), ('i3', t('int[]')), ('t3', t('text[]'))]) types = dict(record=typ) sql, params = format_query('select %(record)s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryUntypedList(self): format_query = self.adapter.format_query values = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, values) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, ['{1,2,3}', '{a,b,c}', '{t,f,t}']) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, [ '{{1,2},{3,4}}', '{{a,b},{c,d}}', '{{t,f},{f,t}}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryUntypedDict(self): format_query = self.adapter.format_query values = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query("values(%(d1)s,%(d2)s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, [values['d1'], values['d2']]) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, ['{t,f,t}', '{1,2,3}', '{a,b,c}']) values = dict(i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, [ '{{t,f},{f,t}}', '{{1,2},{3,4}}', '{{a,b},{c,d}}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) def testAdaptQueryInlineList(self): format_query = self.adapter.format_query values = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual(sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual(sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values, inline=True) self.assertEqual(sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) def testAdaptQueryInlineDict(self): format_query = self.adapter.format_query values = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query( "values(%(d1)s,%(d2)s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = dict(i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values, inline=True) self.assertEqual(sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) def testAdaptQueryWithPgRepr(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s', object(), inline=True) class TestObject: def __pg_repr__(self): return "'adapted'" sql, params = format_query('select %s', [TestObject()], inline=True) self.assertEqual(sql, "select 'adapted'") self.assertEqual(params, []) sql, params = format_query('select %s', [[TestObject()]], inline=True) self.assertEqual(sql, "select ARRAY['adapted']") self.assertEqual(params, []) class TestSchemas(unittest.TestCase): """Test correct handling of schemas (namespaces).""" cls_set_up = False @classmethod def setUpClass(cls): db = DB() cls.with_oids = "with oids" if db.server_version < 120000 else "" query = db.query for num_schema in range(5): if num_schema: schema = "s%d" % num_schema query("drop schema if exists %s cascade" % (schema,)) try: query("create schema %s" % (schema,)) except pg.ProgrammingError: raise RuntimeError("The test user cannot create schemas.\n" "Grant create on database %s to the user" " for running these tests." % dbname) else: schema = "public" query("drop table if exists %s.t" % (schema,)) query("drop table if exists %s.t%d" % (schema, num_schema)) query("create table %s.t %s as select 1 as n, %d as d" % (schema, cls.with_oids, num_schema)) query("create table %s.t%d %s as select 1 as n, %d as d" % (schema, num_schema, cls.with_oids, num_schema)) db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() query = db.query for num_schema in range(5): if num_schema: schema = "s%d" % num_schema query("drop schema %s cascade" % (schema,)) else: schema = "public" query("drop table %s.t" % (schema,)) query("drop table %s.t%d" % (schema, num_schema)) db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() def tearDown(self): self.doCleanups() self.db.close() def testGetTables(self): tables = self.db.get_tables() for num_schema in range(5): if num_schema: schema = "s" + str(num_schema) else: schema = "public" for t in (schema + ".t", schema + ".t" + str(num_schema)): self.assertIn(t, tables) def testGetAttnames(self): get_attnames = self.db.get_attnames query = self.db.query result = {'d': 'int', 'n': 'int'} if self.with_oids: result['oid'] = 'int' r = get_attnames("t") self.assertEqual(r, result) r = get_attnames("s4.t4") self.assertEqual(r, result) query("drop table if exists s3.t3m") self.addCleanup(query, "drop table s3.t3m") query("create table s3.t3m %s as select 1 as m" % (self.with_oids,)) result_m = {'m': 'int'} if self.with_oids: result_m['oid'] = 'int' r = get_attnames("s3.t3m") self.assertEqual(r, result_m) query("set search_path to s1,s3") r = get_attnames("t3") self.assertEqual(r, result) r = get_attnames("t3m") self.assertEqual(r, result_m) def testGet(self): get = self.db.get query = self.db.query PrgError = pg.ProgrammingError self.assertEqual(get("t", 1, 'n')['d'], 0) self.assertEqual(get("t0", 1, 'n')['d'], 0) self.assertEqual(get("public.t", 1, 'n')['d'], 0) self.assertEqual(get("public.t0", 1, 'n')['d'], 0) self.assertRaises(PrgError, get, "public.t1", 1, 'n') self.assertEqual(get("s1.t1", 1, 'n')['d'], 1) self.assertEqual(get("s3.t", 1, 'n')['d'], 3) query("set search_path to s2,s4") self.assertRaises(PrgError, get, "t1", 1, 'n') self.assertEqual(get("t4", 1, 'n')['d'], 4) self.assertRaises(PrgError, get, "t3", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 2) self.assertEqual(get("s3.t3", 1, 'n')['d'], 3) query("set search_path to s1,s3") self.assertRaises(PrgError, get, "t2", 1, 'n') self.assertEqual(get("t3", 1, 'n')['d'], 3) self.assertRaises(PrgError, get, "t4", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 1) self.assertEqual(get("s4.t4", 1, 'n')['d'], 4) def testMunging(self): get = self.db.get query = self.db.query r = get("t", 1, 'n') if self.with_oids: self.assertIn('oid(t)', r) else: self.assertNotIn('oid(t)', r) query("set search_path to s2") r = get("t2", 1, 'n') if self.with_oids: self.assertIn('oid(t2)', r) else: self.assertNotIn('oid(t2)', r) query("set search_path to s3") r = get("t", 1, 'n') if self.with_oids: self.assertIn('oid(t)', r) else: self.assertNotIn('oid(t)', r) class TestDebug(unittest.TestCase): """Test the debug attribute of the DB class.""" def setUp(self): self.db = DB() self.query = self.db.query self.debug = self.db.debug self.output = StringIO() self.stdout, sys.stdout = sys.stdout, self.output def tearDown(self): sys.stdout = self.stdout self.output.close() self.db.debug = debug self.db.close() def get_output(self): return self.output.getvalue() def send_queries(self): self.db.query("select 1") self.db.query("select 2") def testDebugDefault(self): if debug: self.assertEqual(self.db.debug, debug) else: self.assertIsNone(self.db.debug) def testDebugIsFalse(self): self.db.debug = False self.send_queries() self.assertEqual(self.get_output(), "") def testDebugIsTrue(self): self.db.debug = True self.send_queries() self.assertEqual(self.get_output(), "select 1\nselect 2\n") def testDebugIsString(self): self.db.debug = "Test with string: %s." self.send_queries() self.assertEqual(self.get_output(), "Test with string: select 1.\nTest with string: select 2.\n") def testDebugIsFileLike(self): with tempfile.TemporaryFile('w+') as debug_file: self.db.debug = debug_file self.send_queries() debug_file.seek(0) output = debug_file.read() self.assertEqual(output, "select 1\nselect 2\n") self.assertEqual(self.get_output(), "") def testDebugIsCallable(self): output = [] self.db.debug = output.append self.db.query("select 1") self.db.query("select 2") self.assertEqual(output, ["select 1", "select 2"]) self.assertEqual(self.get_output(), "") def testDebugMultipleArgs(self): output = [] self.db.debug = output.append args = ['Error', 42, {1: 'a', 2: 'b'}, [3, 5, 7]] self.db._do_debug(*args) self.assertEqual(output, ['\n'.join(str(arg) for arg in args)]) self.assertEqual(self.get_output(), "") class TestMemoryLeaks(unittest.TestCase): """Test that the DB class does not leak memory.""" def getLeaks(self, fut): ids = set() objs = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) fut() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] if objs and sys.version_info[:3] in ((3, 5, 0), (3, 5, 1)): # workaround for Python issue 26811 objs[:] = [obj for obj in objs if repr(obj) != '(,)'] self.assertEqual(len(objs), 0) def testLeaksWithClose(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() db.close() self.getLeaks(fut) def testLeaksWithoutClose(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() self.getLeaks(fut) if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_classic_functions.py000077500000000000000000001201671365010227600216650ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for the module functions and constants. Contributed by Christoph Zwerschke. These tests do not need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import json import re import pg # the module under test from datetime import timedelta try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str class TestHasConnect(unittest.TestCase): """Test existence of basic pg module functions.""" def testhasPgError(self): self.assertTrue(issubclass(pg.Error, Exception)) def testhasPgWarning(self): self.assertTrue(issubclass(pg.Warning, Exception)) def testhasPgInterfaceError(self): self.assertTrue(issubclass(pg.InterfaceError, pg.Error)) def testhasPgDatabaseError(self): self.assertTrue(issubclass(pg.DatabaseError, pg.Error)) def testhasPgInternalError(self): self.assertTrue(issubclass(pg.InternalError, pg.DatabaseError)) def testhasPgOperationalError(self): self.assertTrue(issubclass(pg.OperationalError, pg.DatabaseError)) def testhasPgProgrammingError(self): self.assertTrue(issubclass(pg.ProgrammingError, pg.DatabaseError)) def testhasPgIntegrityError(self): self.assertTrue(issubclass(pg.IntegrityError, pg.DatabaseError)) def testhasPgDataError(self): self.assertTrue(issubclass(pg.DataError, pg.DatabaseError)) def testhasPgNotSupportedError(self): self.assertTrue(issubclass(pg.NotSupportedError, pg.DatabaseError)) def testhasPgInvalidResultError(self): self.assertTrue(issubclass(pg.InvalidResultError, pg.DataError)) def testhasPgNoResultError(self): self.assertTrue(issubclass(pg.NoResultError, pg.InvalidResultError)) def testhasPgMultipleResultsError(self): self.assertTrue( issubclass(pg.MultipleResultsError, pg.InvalidResultError)) def testhasConnect(self): self.assertTrue(callable(pg.connect)) def testhasEscapeString(self): self.assertTrue(callable(pg.escape_string)) def testhasEscapeBytea(self): self.assertTrue(callable(pg.escape_bytea)) def testhasUnescapeBytea(self): self.assertTrue(callable(pg.unescape_bytea)) def testDefHost(self): d0 = pg.get_defhost() d1 = 'pgtesthost' pg.set_defhost(d1) self.assertEqual(pg.get_defhost(), d1) pg.set_defhost(d0) self.assertEqual(pg.get_defhost(), d0) def testDefPort(self): d0 = pg.get_defport() d1 = 1234 pg.set_defport(d1) self.assertEqual(pg.get_defport(), d1) if d0 is None: d0 = -1 pg.set_defport(d0) if d0 == -1: d0 = None self.assertEqual(pg.get_defport(), d0) def testDefOpt(self): d0 = pg.get_defopt() d1 = '-h pgtesthost -p 1234' pg.set_defopt(d1) self.assertEqual(pg.get_defopt(), d1) pg.set_defopt(d0) self.assertEqual(pg.get_defopt(), d0) def testDefBase(self): d0 = pg.get_defbase() d1 = 'pgtestdb' pg.set_defbase(d1) self.assertEqual(pg.get_defbase(), d1) pg.set_defbase(d0) self.assertEqual(pg.get_defbase(), d0) class TestParseArray(unittest.TestCase): """Test the array parser.""" test_strings = [ ('', str, ValueError), ('{}', None, []), ('{}', str, []), (' { } ', None, []), ('{', str, ValueError), ('{{}', str, ValueError), ('{}{', str, ValueError), ('[]', str, ValueError), ('()', str, ValueError), ('{[]}', str, ['[]']), ('{hello}', int, ValueError), ('{42}', int, [42]), ('{ 42 }', int, [42]), ('{42', int, ValueError), ('{ 42 ', int, ValueError), ('{hello}', str, ['hello']), ('{ hello }', str, ['hello']), ('{hi} ', str, ['hi']), ('{hi} ?', str, ValueError), ('{null}', str, [None]), (' { NULL } ', str, [None]), (' { NULL } ', str, [None]), (' { not null } ', str, ['not null']), (' { not NULL } ', str, ['not NULL']), (' {"null"} ', str, ['null']), (' {"NULL"} ', str, ['NULL']), ('{Hi!}', str, ['Hi!']), ('{"Hi!"}', str, ['Hi!']), ('{" Hi! "}', str, [' Hi! ']), ('{a"}', str, ValueError), ('{"b}', str, ValueError), ('{a"b}', str, ValueError), (r'{a\"b}', str, ['a"b']), (r'{a\,b}', str, ['a,b']), (r'{a\bc}', str, ['abc']), (r'{"a\bc"}', str, ['abc']), (r'{\a\b\c}', str, ['abc']), (r'{"\a\b\c"}', str, ['abc']), (r'{"a"b"}', str, ValueError), (r'{"a""b"}', str, ValueError), (r'{"a\"b"}', str, ['a"b']), ('{"{}"}', str, ['{}']), (r'{\{\}}', str, ['{}']), ('{"{a,b,c}"}', str, ['{a,b,c}']), ("{'abc'}", str, ["'abc'"]), ('{"abc"}', str, ['abc']), (r'{\"abc\"}', str, ['"abc"']), (r"{\'abc\'}", str, ["'abc'"]), (r"{abc,d,efg}", str, ['abc', 'd', 'efg']), ('{Hello World!}', str, ['Hello World!']), ('{Hello, World!}', str, ['Hello', 'World!']), (r'{Hello,\ World!}', str, ['Hello', ' World!']), (r'{Hello\, World!}', str, ['Hello, World!']), ('{"Hello World!"}', str, ['Hello World!']), ('{this, should, be, null}', str, ['this', 'should', 'be', None]), ('{This, should, be, NULL}', str, ['This', 'should', 'be', None]), ('{3, 2, 1, null}', int, [3, 2, 1, None]), ('{3, 2, 1, NULL}', int, [3, 2, 1, None]), ('{3,17,51}', int, [3, 17, 51]), (' { 3 , 17 , 51 } ', int, [3, 17, 51]), ('{3,17,51}', str, ['3', '17', '51']), (' { 3 , 17 , 51 } ', str, ['3', '17', '51']), ('{1,"2",abc,"def"}', str, ['1', '2', 'abc', 'def']), ('{{}}', int, [[]]), ('{{},{}}', int, [[], []]), ('{ {} , {} , {} }', int, [[], [], []]), ('{ {} , {} , {} , }', int, ValueError), ('{{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), ('{{1,2,3},{4,5,6},{7,8,9}}', int, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ('{20000, 25000, 25000, 25000}', int, [20000, 25000, 25000, 25000]), ('{{{17,18,19},{14,15,16},{11,12,13}},' '{{27,28,29},{24,25,26},{21,22,23}},' '{{37,38,39},{34,35,36},{31,32,33}}}', int, [[[17, 18, 19], [14, 15, 16], [11, 12, 13]], [[27, 28, 29], [24, 25, 26], [21, 22, 23]], [[37, 38, 39], [34, 35, 36], [31, 32, 33]]]), ('{{"breakfast", "consulting"}, {"meeting", "lunch"}}', str, [['breakfast', 'consulting'], ['meeting', 'lunch']]), ('[1:3]={1,2,3}', int, [1, 2, 3]), ('[-1:1]={1,2,3}', int, [1, 2, 3]), ('[-1:+1]={1,2,3}', int, [1, 2, 3]), ('[-3:-1]={1,2,3}', int, [1, 2, 3]), ('[+1:+3]={1,2,3}', int, [1, 2, 3]), ('[0:2]={1,2,3}', int, [1, 2, 3]), ('[7:9]={1,2,3}', int, [1, 2, 3]), ('[]={1,2,3}', int, ValueError), ('[1:]={1,2,3}', int, ValueError), ('[:3]={1,2,3}', int, ValueError), ('[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), (' [1:1] [-2:-1] [3:5] = { { { 1 , 2 , 3 }, {4 , 5 , 6 } } }', int, [[[1, 2, 3], [4, 5, 6]]]), ('[1:1][3:5]={{1,2,3},{4,5,6}}', int, [[1, 2, 3], [4, 5, 6]]), ('[3:5]={{1,2,3},{4,5,6}}', int, ValueError), ('[1:1][-2:-1][3:5]={{1,2,3},{4,5,6}}', int, ValueError)] def testParserParams(self): f = pg.cast_array self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '{}', 1) self.assertRaises(TypeError, f, '{}', b',',) self.assertRaises(TypeError, f, '{}', None, None) self.assertRaises(TypeError, f, '{}', None, 1) self.assertRaises(TypeError, f, '{}', None, b'') self.assertRaises(ValueError, f, '{}', None, b'\\') self.assertRaises(ValueError, f, '{}', None, b'{') self.assertRaises(ValueError, f, '{}', None, b'}') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('{}'), []) self.assertEqual(f('{}', None), []) self.assertEqual(f('{}', None, b';'), []) self.assertEqual(f('{}', str), []) self.assertEqual(f('{}', str, b';'), []) def testParserSimple(self): r = pg.cast_array('{a,b,c}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def testParserNested(self): f = pg.cast_array r = f('{{a,b,c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) self.assertRaises(ValueError, f, '{a,{b,c}}') r = f('{{a,b},{c,d}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 2) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(r, ['c', 'd']) r = f('{{a},{b},{c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 1) self.assertEqual(r[0], 'b') r = f('{{{{{{{abc}}}}}}}') for i in range(7): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r, 'abc') def testParserTooDeeplyNested(self): f = pg.cast_array for n in 3, 5, 9, 12, 16, 32, 64, 256: r = '%sa,b,c%s' % ('{' * n, '}' * n) if n > 16: # hard coded maximum depth self.assertRaises(ValueError, f, r) else: r = f(r) for i in range(n - 1): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def testParserCast(self): f = pg.cast_array self.assertEqual(f('{1}'), ['1']) self.assertEqual(f('{1}', None), ['1']) self.assertEqual(f('{1}', int), [1]) self.assertEqual(f('{1}', str), ['1']) self.assertEqual(f('{a}'), ['a']) self.assertEqual(f('{a}', None), ['a']) self.assertRaises(ValueError, f, '{a}', int) self.assertEqual(f('{a}', str), ['a']) cast = lambda s: '%s is ok' % s self.assertEqual(f('{a}', cast), ['a is ok']) def testParserDelim(self): f = pg.cast_array self.assertEqual(f('{1,2}'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b','), ['1', '2']) self.assertEqual(f('{1;2}'), ['1;2']) self.assertEqual(f('{1;2}', delim=b';'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b';'), ['1,2']) def testParserWithData(self): f = pg.cast_array for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def testParserWithoutCast(self): f = pg.cast_array for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def testParserWithDifferentDelimiter(self): f = pg.cast_array def replace_comma(value): if isinstance(value, str): return value.replace(',', ';') elif isinstance(value, list): return [replace_comma(v) for v in value] else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseRecord(unittest.TestCase): """Test the record parser.""" test_strings = [ ('', None, ValueError), ('', str, ValueError), ('(', None, ValueError), ('(', str, ValueError), ('()', None, (None,)), ('()', str, (None,)), ('()', int, (None,)), ('(,)', str, (None, None)), ('( , )', str, (' ', ' ')), ('(")', None, ValueError), ('("")', None, ('',)), ('("")', str, ('',)), ('("")', int, ValueError), ('("" )', None, (' ',)), ('("" )', str, (' ',)), ('("" )', int, ValueError), (' () ', None, (None,)), (' ( ) ', None, (' ',)), ('(', str, ValueError), ('(()', str, ('(',)), ('(())', str, ValueError), ('()(', str, ValueError), ('()()', str, ValueError), ('[]', str, ValueError), ('{}', str, ValueError), ('([])', str, ('[]',)), ('(hello)', int, ValueError), ('(42)', int, (42,)), ('( 42 )', int, (42,)), ('( 42)', int, (42,)), ('(42)', str, ('42',)), ('( 42 )', str, (' 42 ',)), ('( 42)', str, (' 42',)), ('(42', int, ValueError), ('( 42 ', int, ValueError), ('(hello)', str, ('hello',)), ('( hello )', str, (' hello ',)), ('(hello))', str, ValueError), (' (hello) ', str, ('hello',)), (' (hello) )', str, ValueError), ('(hello)?', str, ValueError), ('(null)', str, ('null',)), ('(null)', int, ValueError), (' ( NULL ) ', str, (' NULL ',)), (' ( NULL ) ', str, (' NULL ',)), (' ( null null ) ', str, (' null null ',)), (' ("null") ', str, ('null',)), (' ("NULL") ', str, ('NULL',)), ('(Hi!)', str, ('Hi!',)), ('("Hi!")', str, ('Hi!',)), ("('Hi!')", str, ("'Hi!'",)), ('(" Hi! ")', str, (' Hi! ',)), ('("Hi!" )', str, ('Hi! ',)), ('( "Hi!")', str, (' Hi!',)), ('( "Hi!" )', str, (' Hi! ',)), ('( ""Hi!"" )', str, (' Hi! ',)), ('( """Hi!""" )', str, (' "Hi!" ',)), ('(a")', str, ValueError), ('("b)', str, ValueError), ('("a" "b)', str, ValueError), ('("a" "b")', str, ('a b',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a,b" "c,d" )', str, (' a,b c,d ',)), ('( "(a,b,c)" d, e, "f,g")', str, (' (a,b,c) d', ' e', ' f,g')), ('(a",b,c",d,"e,f")', str, ('a,b,c', 'd', 'e,f')), ('( """a,b""", ""c,d"", "e,f", "g", ""h"", """i""")', str, (' "a,b"', ' c', 'd', ' e,f', ' g', ' h', ' "i"')), ('(a",b)",c"),(d,e)",f,g)', str, ('a,b)', 'c),(d,e)', 'f', 'g')), ('(a"b)', str, ValueError), (r'(a\"b)', str, ('a"b',)), ('(a""b)', str, ('ab',)), ('("a""b")', str, ('a"b',)), (r'(a\,b)', str, ('a,b',)), (r'(a\bc)', str, ('abc',)), (r'("a\bc")', str, ('abc',)), (r'(\a\b\c)', str, ('abc',)), (r'("\a\b\c")', str, ('abc',)), ('("()")', str, ('()',)), (r'(\,)', str, (',',)), (r'(\(\))', str, ('()',)), (r'(\)\()', str, (')(',)), ('("(a,b,c)")', str, ('(a,b,c)',)), ("('abc')", str, ("'abc'",)), ('("abc")', str, ('abc',)), (r'(\"abc\")', str, ('"abc"',)), (r"(\'abc\')", str, ("'abc'",)), ('(Hello World!)', str, ('Hello World!',)), ('(Hello, World!)', str, ('Hello', ' World!',)), (r'(Hello,\ World!)', str, ('Hello', ' World!',)), (r'(Hello\, World!)', str, ('Hello, World!',)), ('("Hello World!")', str, ('Hello World!',)), ("(this,shouldn't,be,null)", str, ('this', "shouldn't", 'be', 'null')), ('(null,should,be,)', str, ('null', 'should', 'be', None)), ('(abcABC0123!?+-*/=&%$\\\\\'\\"{[]}"""":;\\,,)', str, ('abcABC0123!?+-*/=&%$\\\'"{[]}":;,', None)), ('(3, 2, 1,)', int, (3, 2, 1, None)), ('(3, 2, 1, )', int, ValueError), ('(, 1, 2, 3)', int, (None, 1, 2, 3)), ('( , 1, 2, 3)', int, ValueError), ('(,1,,2,,3,)', int, (None, 1, None, 2, None, 3, None)), ('(3,17,51)', int, (3, 17, 51)), (' ( 3 , 17 , 51 ) ', int, (3, 17, 51)), ('(3,17,51)', str, ('3', '17', '51')), (' ( 3 , 17 , 51 ) ', str, (' 3 ', ' 17 ', ' 51 ')), ('(1,"2",abc,"def")', str, ('1', '2', 'abc', 'def')), ('(())', str, ValueError), ('()))', str, ValueError), ('()()', str, ValueError), ('((()', str, ('((',)), ('(())', int, ValueError), ('((),())', str, ValueError), ('("()","()")', str, ('()', '()')), ('( " () , () , () " )', str, (' () , () , () ',)), ('(20000, 25000, 25000, 25000)', int, (20000, 25000, 25000, 25000)), ('("breakfast","consulting","meeting","lunch")', str, ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str), ValueError), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str), ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str, str), ValueError), ('("fuzzy dice",42,1.9375)', None, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', str, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', int, ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float), ('fuzzy dice', 42, 1.9375)), ('("fuzzy dice",42,1.9375)', (str, int), ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float, str), ValueError), ('("fuzzy dice",42,)', (str, int, float), ('fuzzy dice', 42, None)), ('("fuzzy dice",42,)', (str, int), ValueError), ('("",42,)', (str, int, float), ('', 42, None)), ('("fuzzy dice","",1.9375)', (str, int, float), ValueError), ('(fuzzy dice,"42","1.9375")', (str, int, float), ('fuzzy dice', 42, 1.9375))] def testParserParams(self): f = pg.cast_record self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '()', 1) self.assertRaises(TypeError, f, '()', b',',) self.assertRaises(TypeError, f, '()', None, None) self.assertRaises(TypeError, f, '()', None, 1) self.assertRaises(TypeError, f, '()', None, b'') self.assertRaises(ValueError, f, '()', None, b'\\') self.assertRaises(ValueError, f, '()', None, b'(') self.assertRaises(ValueError, f, '()', None, b')') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('()'), (None,)) self.assertEqual(f('()', None), (None,)) self.assertEqual(f('()', None, b';'), (None,)) self.assertEqual(f('()', str), (None,)) self.assertEqual(f('()', str, b';'), (None,)) def testParserSimple(self): r = pg.cast_record('(a,b,c)') self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertEqual(r, ('a', 'b', 'c')) def testParserNested(self): f = pg.cast_record self.assertRaises(ValueError, f, '((a,b,c))') self.assertRaises(ValueError, f, '((a,b),(c,d))') self.assertRaises(ValueError, f, '((a),(b),(c))') self.assertRaises(ValueError, f, '(((((((abc)))))))') def testParserManyElements(self): f = pg.cast_record for n in 3, 5, 9, 12, 16, 32, 64, 256: r = '(%s)' % ','.join(map(str, range(n))) r = f(r, int) self.assertEqual(r, tuple(range(n))) def testParserCastUniform(self): f = pg.cast_record self.assertEqual(f('(1)'), ('1',)) self.assertEqual(f('(1)', None), ('1',)) self.assertEqual(f('(1)', int), (1,)) self.assertEqual(f('(1)', str), ('1',)) self.assertEqual(f('(a)'), ('a',)) self.assertEqual(f('(a)', None), ('a',)) self.assertRaises(ValueError, f, '(a)', int) self.assertEqual(f('(a)', str), ('a',)) cast = lambda s: '%s is ok' % s self.assertEqual(f('(a)', cast), ('a is ok',)) def testParserCastNonUniform(self): f = pg.cast_record self.assertEqual(f('(1)', []), ('1',)) self.assertEqual(f('(1)', [None]), ('1',)) self.assertEqual(f('(1)', [str]), ('1',)) self.assertEqual(f('(1)', [int]), (1,)) self.assertRaises(ValueError, f, '(1)', [None, None]) self.assertRaises(ValueError, f, '(1)', [str, str]) self.assertRaises(ValueError, f, '(1)', [int, int]) self.assertEqual(f('(a)', [None]), ('a',)) self.assertEqual(f('(a)', [str]), ('a',)) self.assertRaises(ValueError, f, '(a)', [int]) self.assertEqual(f('(1,a)', [int, str]), (1, 'a')) self.assertRaises(ValueError, f, '(1,a)', [str, int]) self.assertEqual(f('(a,1)', [str, int]), ('a', 1)) self.assertRaises(ValueError, f, '(a,1)', [int, str]) self.assertEqual(f('(1,a,2,b,3,c)', [int, str, int, str, int, str]), (1, 'a', 2, 'b', 3, 'c')) self.assertEqual(f('(1,a,2,b,3,c)', (int, str, int, str, int, str)), (1, 'a', 2, 'b', 3, 'c')) cast1 = lambda s: '%s is ok' % s self.assertEqual(f('(a)', [cast1]), ('a is ok',)) cast2 = lambda s: 'and %s is ok, too' % s self.assertEqual(f('(a,b)', [cast1, cast2]), ('a is ok', 'and b is ok, too')) self.assertRaises(ValueError, f, '(a)', [cast1, cast2]) self.assertRaises(ValueError, f, '(a,b,c)', [cast1, cast2]) self.assertEqual(f('(1,2,3,4,5,6)', [int, float, str, None, cast1, cast2]), (1, 2.0, '3', '4', '5 is ok', 'and 6 is ok, too')) def testParserDelim(self): f = pg.cast_record self.assertEqual(f('(1,2)'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b','), ('1', '2')) self.assertEqual(f('(1;2)'), ('1;2',)) self.assertEqual(f('(1;2)', delim=b';'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b';'), ('1,2',)) def testParserWithData(self): f = pg.cast_record for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def testParserWithoutCast(self): f = pg.cast_record for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def testParserWithDifferentDelimiter(self): f = pg.cast_record def replace_comma(value): if isinstance(value, str): return value.replace(';', '@').replace( ',', ';').replace('@', ',') elif isinstance(value, tuple): return tuple(replace_comma(v) for v in value) else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseHStore(unittest.TestCase): """Test the hstore parser.""" test_strings = [ ('', {}), ('=>', ValueError), ('""=>', ValueError), ('=>""', ValueError), ('""=>""', {'': ''}), ('NULL=>NULL', {'NULL': None}), ('null=>null', {'null': None}), ('NULL=>"NULL"', {'NULL': 'NULL'}), ('null=>"null"', {'null': 'null'}), ('k', ValueError), ('k,', ValueError), ('k=', ValueError), ('k=>', ValueError), ('k=>v', {'k': 'v'}), ('k=>v,', ValueError), (' k => v ', {'k': 'v'}), (' k => v ', {'k': 'v'}), ('" k " => " v "', {' k ': ' v '}), ('"k=>v', ValueError), ('k=>"v', ValueError), ('"1-a" => "anything at all"', {'1-a': 'anything at all'}), ('k => v, foo => bar, baz => whatever,' ' "1-a" => "anything at all"', {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1-a': 'anything at all'}), ('"Hello, World!"=>"Hi!"', {'Hello, World!': 'Hi!'}), ('"Hi!"=>"Hello, World!"', {'Hi!': 'Hello, World!'}), (r'"k=>v"=>k\=\>v', {'k=>v': 'k=>v'}), (r'k\=\>v=>"k=>v"', {'k=>v': 'k=>v'}), ('a\\,b=>a,b=>a', {'a,b': 'a', 'b': 'a'})] def testParser(self): f = pg.cast_hstore self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '', None) for string, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) class TestCastInterval(unittest.TestCase): """Test the interval typecast function.""" intervals = [ ((0, 0, 0, 1, 0, 0, 0), ('1:00:00', '01:00:00', '@ 1 hour', 'PT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-1:00:00', '-01:00:00', '@ -1 hour', 'PT-1H')), ((0, 0, 0, 1, 0, 0, 0), ('0-0 0 1:00:00', '0 years 0 mons 0 days 01:00:00', '@ 0 years 0 mons 0 days 1 hour', 'P0Y0M0DT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-0-0 -1:00:00', '0 years 0 mons 0 days -01:00:00', '@ 0 years 0 mons 0 days -1 hour', 'P0Y0M0DT-1H')), ((0, 0, 1, 0, 0, 0, 0), ('1 0:00:00', '1 day', '@ 1 day', 'P1D')), ((0, 0, -1, 0, 0, 0, 0), ('-1 0:00:00', '-1 day', '@ -1 day', 'P-1D')), ((0, 1, 0, 0, 0, 0, 0), ('0-1', '1 mon', '@ 1 mon', 'P1M')), ((1, 0, 0, 0, 0, 0, 0), ('1-0', '1 year', '@ 1 year', 'P1Y')), ((0, 0, 0, 2, 0, 0, 0), ('2:00:00', '02:00:00', '@ 2 hours', 'PT2H')), ((0, 0, 2, 0, 0, 0, 0), ('2 0:00:00', '2 days', '@ 2 days', 'P2D')), ((0, 2, 0, 0, 0, 0, 0), ('0-2', '2 mons', '@ 2 mons', 'P2M')), ((2, 0, 0, 0, 0, 0, 0), ('2-0', '2 years', '@ 2 years', 'P2Y')), ((0, 0, 0, -3, 0, 0, 0), ('-3:00:00', '-03:00:00', '@ 3 hours ago', 'PT-3H')), ((0, 0, -3, 0, 0, 0, 0), ('-3 0:00:00', '-3 days', '@ 3 days ago', 'P-3D')), ((0, -3, 0, 0, 0, 0, 0), ('-0-3', '-3 mons', '@ 3 mons ago', 'P-3M')), ((-3, 0, 0, 0, 0, 0, 0), ('-3-0', '-3 years', '@ 3 years ago', 'P-3Y')), ((0, 0, 0, 0, 1, 0, 0), ('0:01:00', '00:01:00', '@ 1 min', 'PT1M')), ((0, 0, 0, 0, 0, 1, 0), ('0:00:01', '00:00:01', '@ 1 sec', 'PT1S')), ((0, 0, 0, 0, 0, 0, 1), ('0:00:00.000001', '00:00:00.000001', '@ 0.000001 secs', 'PT0.000001S')), ((0, 0, 0, 0, 2, 0, 0), ('0:02:00', '00:02:00', '@ 2 mins', 'PT2M')), ((0, 0, 0, 0, 0, 2, 0), ('0:00:02', '00:00:02', '@ 2 secs', 'PT2S')), ((0, 0, 0, 0, 0, 0, 2), ('0:00:00.000002', '00:00:00.000002', '@ 0.000002 secs', 'PT0.000002S')), ((0, 0, 0, 0, -3, 0, 0), ('-0:03:00', '-00:03:00', '@ 3 mins ago', 'PT-3M')), ((0, 0, 0, 0, 0, -3, 0), ('-0:00:03', '-00:00:03', '@ 3 secs ago', 'PT-3S')), ((0, 0, 0, 0, 0, 0, -3), ('-0:00:00.000003', '-00:00:00.000003', '@ 0.000003 secs ago', 'PT-0.000003S')), ((1, 2, 0, 0, 0, 0, 0), ('1-2', '1 year 2 mons', '@ 1 year 2 mons', 'P1Y2M')), ((0, 0, 3, 4, 5, 6, 0), ('3 4:05:06', '3 days 04:05:06', '@ 3 days 4 hours 5 mins 6 secs', 'P3DT4H5M6S')), ((1, 2, 3, 4, 5, 6, 0), ('+1-2 +3 +4:05:06', '1 year 2 mons 3 days 04:05:06', '@ 1 year 2 mons 3 days 4 hours 5 mins 6 secs', 'P1Y2M3DT4H5M6S')), ((1, 2, 3, -4, -5, -6, 0), ('+1-2 +3 -4:05:06', '1 year 2 mons 3 days -04:05:06', '@ 1 year 2 mons 3 days -4 hours -5 mins -6 secs', 'P1Y2M3DT-4H-5M-6S')), ((1, 2, 3, -4, 5, 6, 0), ('+1-2 +3 -3:54:54', '1 year 2 mons 3 days -03:54:54', '@ 1 year 2 mons 3 days -3 hours -54 mins -54 secs', 'P1Y2M3DT-3H-54M-54S')), ((-1, -2, 3, -4, -5, -6, 0), ('-1-2 +3 -4:05:06', '-1 years -2 mons +3 days -04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago', 'P-1Y-2M3DT-4H-5M-6S')), ((1, 2, -3, 4, 5, 6, 0), ('+1-2 -3 +4:05:06', '1 year 2 mons -3 days +04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs', 'P1Y2M-3DT4H5M6S')), ((0, 0, 0, 1, 30, 0, 0), ('1:30:00', '01:30:00', '@ 1 hour 30 mins', 'PT1H30M')), ((0, 0, 0, 3, 15, 45, 123456), ('3:15:45.123456', '03:15:45.123456', '@ 3 hours 15 mins 45.123456 secs', 'PT3H15M45.123456S')), ((0, 0, 0, 3, 15, -5, 123), ('3:14:55.000123', '03:14:55.000123', '@ 3 hours 14 mins 55.000123 secs', 'PT3H14M55.000123S')), ((0, 0, 0, 3, -5, 15, -12345), ('2:55:14.987655', '02:55:14.987655', '@ 2 hours 55 mins 14.987655 secs', 'PT2H55M14.987655S')), ((0, 0, 0, 2, -1, 0, 0), ('1:59:00', '01:59:00', '@ 1 hour 59 mins', 'PT1H59M')), ((0, 0, 0, -1, 2, 0, 0), ('-0:58:00', '-00:58:00', '@ 58 mins ago', 'PT-58M')), ((1, 11, 0, 0, 0, 0, 0), ('1-11', '1 year 11 mons', '@ 1 year 11 mons', 'P1Y11M')), ((0, -10, 0, 0, 0, 0, 0), ('-0-10', '-10 mons', '@ 10 mons ago', 'P-10M')), ((0, 0, 2, -1, 0, 0, 0), ('+0-0 +2 -1:00:00', '2 days -01:00:00', '@ 2 days -1 hours', 'P2DT-1H')), ((0, 0, -1, 2, 0, 0, 0), ('+0-0 -1 +2:00:00', '-1 days +02:00:00', '@ 1 day -2 hours ago', 'P-1DT2H')), ((0, 0, 1, 0, 0, 0, 1), ('1 0:00:00.000001', '1 day 00:00:00.000001', '@ 1 day 0.000001 secs', 'P1DT0.000001S')), ((0, 0, 1, 0, 0, 1, 0), ('1 0:00:01', '1 day 00:00:01', '@ 1 day 1 sec', 'P1DT1S')), ((0, 0, 1, 0, 1, 0, 0), ('1 0:01:00', '1 day 00:01:00', '@ 1 day 1 min', 'P1DT1M')), ((0, 0, 0, 0, 1, 0, -1), ('0:00:59.999999', '00:00:59.999999', '@ 59.999999 secs', 'PT59.999999S')), ((0, 0, 0, 0, -1, 0, 1), ('-0:00:59.999999', '-00:00:59.999999', '@ 59.999999 secs ago', 'PT-59.999999S')), ((0, 0, 0, 0, -1, 1, 1), ('-0:00:58.999999', '-00:00:58.999999', '@ 58.999999 secs ago', 'PT-58.999999S')), ((0, 0, 42, 0, 0, 0, 0), ('42 0:00:00', '42 days', '@ 42 days', 'P42D')), ((0, 0, -7, 0, 0, 0, 0), ('-7 0:00:00', '-7 days', '@ 7 days ago', 'P-7D')), ((1, 1, 1, 1, 1, 0, 0), ('+1-1 +1 +1:01:00', '1 year 1 mon 1 day 01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min', 'P1Y1M1DT1H1M')), ((0, -11, -1, -1, 1, 0, 0), ('-0-11 -1 -0:59:00', '-11 mons -1 days -00:59:00', '@ 11 mons 1 day 59 mins ago', 'P-11M-1DT-59M')), ((-1, -1, -1, -1, -1, 0, 0), ('-1-1 -1 -1:01:00', '-1 years -1 mons -1 days -01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min ago', 'P-1Y-1M-1DT-1H-1M')), ((-1, 0, -3, 1, 0, 0, 0), ('-1-0 -3 +1:00:00', '-1 years -3 days +01:00:00', '@ 1 year 3 days -1 hours ago', 'P-1Y-3DT1H')), ((1, 0, 0, 0, 0, 0, 1), ('+1-0 +0 +0:00:00.000001', '1 year 00:00:00.000001', '@ 1 year 0.000001 secs', 'P1YT0.000001S')), ((1, 0, 0, 0, 0, 0, -1), ('+1-0 +0 -0:00:00.000001', '1 year -00:00:00.000001', '@ 1 year -0.000001 secs', 'P1YT-0.000001S')), ((1, 2, 3, 4, 5, 6, 7), ('+1-2 +3 +4:05:06.000007', '1 year 2 mons 3 days 04:05:06.000007', '@ 1 year 2 mons 3 days 4 hours 5 mins 6.000007 secs', 'P1Y2M3DT4H5M6.000007S')), ((0, 10, 3, -4, 5, -6, 7), ('+0-10 +3 -3:55:05.999993', '10 mons 3 days -03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs', 'P10M3DT-3H-55M-5.999993S')), ((0, -10, -3, 4, -5, 6, -7), ('-0-10 -3 +3:55:05.999993', '-10 mons -3 days +03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs ago', 'P-10M-3DT3H55M5.999993S'))] def testCastInterval(self): for result, values in self.intervals: f = pg.cast_interval years, mons, days, hours, mins, secs, usecs = result days += 365 * years + 30 * mons interval = timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) for value in values: self.assertEqual(f(value), interval) class TestEscapeFunctions(unittest.TestCase): """Test pg escape and unescape functions. The libpq interface memorizes some parameters of the last opened connection that influence the result of these functions. Therefore we cannot do rigid tests of these functions here. We leave this for the test module that runs with a database. """ def testEscapeString(self): f = pg.escape_string r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f("that's cheese") self.assertIsInstance(r, str) self.assertEqual(r, "that''s cheese") def testEscapeBytea(self): f = pg.escape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, unicode) self.assertEqual(r, u'plain') r = f("that's cheese") self.assertIsInstance(r, str) self.assertEqual(r, "that''s cheese") def testUnescapeBytea(self): f = pg.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(u'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf-8')) r = f(u"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, u"das is' käse".encode('utf-8')) r = f(b'O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') r = f(u'O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') class TestConfigFunctions(unittest.TestCase): """Test the functions for changing default settings. The effect of most of these cannot be tested here, because that needs a database connection. So we merely test their existence here. """ def testGetDatestyle(self): self.assertIsNone(pg.get_datestyle()) def testGetDatestyle(self): datestyle = pg.get_datestyle() try: pg.set_datestyle('ISO, YMD') self.assertEqual(pg.get_datestyle(), 'ISO, YMD') pg.set_datestyle('Postgres, MDY') self.assertEqual(pg.get_datestyle(), 'Postgres, MDY') pg.set_datestyle('Postgres, DMY') self.assertEqual(pg.get_datestyle(), 'Postgres, DMY') pg.set_datestyle('SQL, MDY') self.assertEqual(pg.get_datestyle(), 'SQL, MDY') pg.set_datestyle('SQL, DMY') self.assertEqual(pg.get_datestyle(), 'SQL, DMY') pg.set_datestyle('German, DMY') self.assertEqual(pg.get_datestyle(), 'German, DMY') pg.set_datestyle(None) self.assertIsNone(pg.get_datestyle()) finally: pg.set_datestyle(datestyle) def testGetDecimalPoint(self): r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '.') def testSetDecimalPoint(self): point = pg.get_decimal_point() try: pg.set_decimal_point('*') r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '*') finally: pg.set_decimal_point(point) r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, point) def testGetDecimal(self): r = pg.get_decimal() self.assertIs(r, pg.Decimal) def testSetDecimal(self): decimal_class = pg.Decimal try: pg.set_decimal(int) r = pg.get_decimal() self.assertIs(r, int) finally: pg.set_decimal(decimal_class) r = pg.get_decimal() self.assertIs(r, decimal_class) def testGetBool(self): r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) def testSetBool(self): use_bool = pg.get_bool() try: pg.set_bool(False) r = pg.get_bool() pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(True) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) finally: pg.set_bool(use_bool) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, use_bool) def testGetByteaEscaped(self): r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) def testSetByteaEscaped(self): bytea_escaped = pg.get_bytea_escaped() try: pg.set_bytea_escaped(True) r = pg.get_bytea_escaped() pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(False) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) finally: pg.set_bytea_escaped(bytea_escaped) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, bytea_escaped) def testGetJsondecode(self): r = pg.get_jsondecode() self.assertTrue(callable(r)) self.assertIs(r, json.loads) def testSetJsondecode(self): jsondecode = pg.get_jsondecode() try: pg.set_jsondecode(None) r = pg.get_jsondecode() self.assertIsNone(r) pg.set_jsondecode(str) r = pg.get_jsondecode() self.assertIs(r, str) self.assertRaises(TypeError, pg.set_jsondecode, 'invalid') finally: pg.set_jsondecode(jsondecode) r = pg.get_jsondecode() self.assertIs(r, jsondecode) class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def testVersion(self): v = pg.version self.assertIsInstance(v, str) # make sure the version conforms to PEP440 re_version = r"""^ (\d[\.\d]*(?<= \d)) ((?:[abc]|rc)\d+)? (?:(\.post\d+))? (?:(\.dev\d+))? (?:(\+(?![.])[a-zA-Z0-9\.]*[a-zA-Z0-9]))? $""" match = re.match(re_version, v, re.X) self.assertIsNotNone(match) self.assertEqual(pg.__version__, v) if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_classic_largeobj.py000077500000000000000000000350501365010227600214360ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the classic PyGreSQL interface. Sub-tests for large object support. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import tempfile import os import pg # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass windows = os.name == 'nt' def connect(): """Create a basic pg connection to the test database.""" connection = pg.connect(dbname, dbhost, dbport) connection.query("set client_min_messages=warning") return connection class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def testLargeObjectIntConstants(self): names = 'INV_READ INV_WRITE SEEK_SET SEEK_CUR SEEK_END'.split() for name in names: try: value = getattr(pg, name) except AttributeError: self.fail('Module constant %s is missing' % name) self.assertIsInstance(value, int) class TestCreatingLargeObjects(unittest.TestCase): """Test creating large objects using a connection.""" def setUp(self): self.c = connect() self.c.query('begin') def tearDown(self): self.c.query('rollback') self.c.close() def assertIsLargeObject(self, obj): self.assertIsNotNone(obj) self.assertTrue(hasattr(obj, 'open')) self.assertTrue(hasattr(obj, 'close')) self.assertTrue(hasattr(obj, 'oid')) self.assertTrue(hasattr(obj, 'pgcnx')) self.assertTrue(hasattr(obj, 'error')) self.assertIsInstance(obj.oid, int) self.assertNotEqual(obj.oid, 0) self.assertIs(obj.pgcnx, self.c) self.assertIsInstance(obj.error, str) self.assertFalse(obj.error) def testLoCreate(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) finally: del large_object def testGetLo(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) oid = large_object.oid finally: del large_object data = b'some data to be shared' large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_WRITE) large_object.write(data) large_object.close() finally: del large_object large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_READ) r = large_object.read(80) large_object.close() large_object.unlink() finally: del large_object self.assertIsInstance(r, bytes) self.assertEqual(r, data) def testLoImport(self): if windows: # NamedTemporaryFiles don't work well here fname = 'temp_test_pg_largeobj_import.txt' f = open(fname, 'wb') else: f = tempfile.NamedTemporaryFile() fname = f.name data = b'some data to be imported' f.write(data) if windows: f.close() f = open(fname, 'rb') else: f.flush() f.seek(0) large_object = self.c.loimport(f.name) try: f.close() if windows: os.remove(fname) self.assertIsLargeObject(large_object) large_object.open(pg.INV_READ) large_object.seek(0, pg.SEEK_SET) r = large_object.size() self.assertIsInstance(r, int) self.assertEqual(r, len(data)) r = large_object.read(80) self.assertIsInstance(r, bytes) self.assertEqual(r, data) large_object.close() large_object.unlink() finally: del large_object class TestLargeObjects(unittest.TestCase): """Test the large object methods.""" def setUp(self): self.pgcnx = connect() self.pgcnx.query('begin') self.obj = self.pgcnx.locreate(pg.INV_READ | pg.INV_WRITE) def tearDown(self): if self.obj.oid: try: self.obj.close() except (SystemError, IOError): pass try: self.obj.unlink() except (SystemError, IOError): pass del self.obj try: self.pgcnx.query('rollback') except SystemError: pass self.pgcnx.close() def testClassName(self): self.assertEqual(self.obj.__class__.__name__, 'LargeObject') def testModuleName(self): self.assertEqual(self.obj.__class__.__module__, 'pg') def testOid(self): self.assertIsInstance(self.obj.oid, int) self.assertNotEqual(self.obj.oid, 0) def testPgcn(self): self.assertIs(self.obj.pgcnx, self.pgcnx) def testError(self): self.assertIsInstance(self.obj.error, str) self.assertEqual(self.obj.error, '') def testStr(self): self.obj.open(pg.INV_WRITE) data = b'some object to be printed' self.obj.write(data) oid = self.obj.oid r = str(self.obj) self.assertEqual(r, 'Opened large object, oid %d' % oid) self.obj.close() r = str(self.obj) self.assertEqual(r, 'Closed large object, oid %d' % oid) def testRepr(self): r = repr(self.obj) self.assertTrue(r.startswith('= len(self.sent): return True sleep(0.01) def receive(self, stop=False): if not self.sent: stop = True if stop: self.notify_handler(stop=True, payload='stop') self.assertTrue(self.wait()) self.assertFalse(self.timeout) self.assertEqual(self.received, self.sent) self.received = [] self.sent = [] self.assertEqual(self.handler.listening, not self.stopped) def testNotifyHandlerEmpty(self): self.start_handler() self.notify_handler(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def testNotifyQueryEmpty(self): self.start_handler() self.notify_query(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def testNotifyHandlerOnce(self): self.start_handler() self.notify_handler() self.assertEqual(len(self.sent), 1) self.receive() self.receive(stop=True) def testNotifyQueryOnce(self): self.start_handler() self.notify_query() self.receive() self.notify_query(stop=True) self.receive() def testNotifyWithArgs(self): arg_dict = {'test': 42, 'more': 43, 'less': 41} self.start_handler('test_args', arg_dict) self.notify_query() self.receive(stop=True) def testNotifySeveralTimes(self): arg_dict = {'test': 1} self.start_handler(arg_dict=arg_dict) for count in range(3): self.notify_query() self.receive() arg_dict['test'] += 1 for count in range(2): self.notify_handler() self.receive() arg_dict['test'] += 1 for count in range(3): self.notify_query() self.receive(stop=True) def testNotifyOnceWithPayload(self): self.start_handler() self.notify_query(payload='test_payload') self.receive(stop=True) def testNotifyWithArgsAndPayload(self): self.start_handler(arg_dict={'foo': 'bar'}) self.notify_query(payload='baz') self.receive(stop=True) def testNotifyQuotedNames(self): self.start_handler('Hello, World!') self.notify_query(payload='How do you do?') self.receive(stop=True) def testNotifyWithFivePayloads(self): self.start_handler('gimme_5', {'test': 'Gimme 5'}) for count in range(5): self.notify_query(payload="Round %d" % count) self.assertEqual(len(self.sent), 5) self.receive(stop=True) def testReceiveImmediately(self): self.start_handler('immediate', {'test': 'immediate'}) for count in range(3): self.notify_query(payload="Round %d" % count) self.receive() self.receive(stop=True) def testNotifyDistinctInTransaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for count in range(3): self.notify_query(payload='Round %d' % count) self.db.commit() self.receive(stop=True) def testNotifySameInTransaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for count in range(3): self.notify_query() self.db.commit() # these same notifications may be delivered as one, # so we must not wait for all three to appear self.sent = self.sent[:1] self.receive(stop=True) def testNotifyNoTimeout(self): self.start_handler(timeout=None) self.assertIsNone(self.handler.timeout) self.assertTrue(self.handler.listening) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def testNotifyZeroTimeout(self): self.start_handler(timeout=0) self.assertEqual(self.handler.timeout, 0) self.assertTrue(self.handler.listening) self.assertFalse(self.timeout) def testNotifyWithoutTimeout(self): self.start_handler(timeout=1) self.assertEqual(self.handler.timeout, 1) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def testNotifyWithTimeout(self): self.start_handler(timeout=0.01) sleep(0.02) self.assertTrue(self.timeout) if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_dbapi20.py000077500000000000000000001555751365010227600174100ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import pgdb try: from . import dbapi20 except (ImportError, ValueError, SystemError): import dbapi20 # We need a database to test against. # If LOCAL_PyGreSQL.py exists we will get our information from that. # Otherwise we use the defaults. dbname = 'dbapi20_test' dbhost = '' dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass import gc import sys from datetime import date, time, datetime, timedelta from uuid import UUID as Uuid try: # noinspection PyUnresolvedReferences long except NameError: # Python >= 3.0 long = int try: from collections import OrderedDict except ImportError: # Python 2.6 or 3.0 OrderedDict = None class PgBitString: """Test object with a PostgreSQL representation as Bit String.""" def __init__(self, value): self.value = value def __pg_repr__(self): return "B'{0:b}'".format(self.value) class test_PyGreSQL(dbapi20.DatabaseAPI20Test): driver = pgdb connect_args = () connect_kw_args = {'database': dbname, 'host': '%s:%d' % (dbhost or '', dbport or -1)} lower_func = 'lower' # For stored procedure test def setUp(self): # Call superclass setUp in case this does something in the future dbapi20.DatabaseAPI20Test.setUp(self) try: con = self._connect() con.close() except pgdb.Error: # try to create a missing database import pg try: # first try to log in as superuser db = pg.DB('postgres', dbhost or None, dbport or -1, user='postgres') except Exception: # then try to log in as current user db = pg.DB('postgres', dbhost or None, dbport or -1) db.query('create database ' + dbname) def tearDown(self): dbapi20.DatabaseAPI20Test.tearDown(self) def test_version(self): v = pgdb.version self.assertIsInstance(v, str) self.assertIn('.', v) self.assertEqual(pgdb.__version__, v) def test_connect_kwargs(self): application_name = 'PyGreSQL DB API 2.0 Test' self.connect_kw_args['application_name'] = application_name con = self._connect() cur = con.cursor() cur.execute("select application_name from pg_stat_activity" " where application_name = %s", (application_name,)) self.assertEqual(cur.fetchone(), (application_name,)) def test_percent_sign(self): con = self._connect() cur = con.cursor() cur.execute("select %s, 'a %% sign'", ('a % sign',)) self.assertEqual(cur.fetchone(), ('a % sign', 'a % sign')) cur.execute("select 'a % sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) cur.execute("select 'a %% sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) def test_callproc_no_params(self): con = self._connect() cur = con.cursor() # note that now() does not change within a transaction cur.execute('select now()') now = cur.fetchone()[0] res = cur.callproc('now') self.assertIsNone(res) res = cur.fetchone()[0] self.assertEqual(res, now) def test_callproc_bad_params(self): con = self._connect() cur = con.cursor() self.assertRaises(TypeError, cur.callproc, 'lower', 42) self.assertRaises(pgdb.ProgrammingError, cur.callproc, 'lower', (42,)) def test_callproc_one_param(self): con = self._connect() cur = con.cursor() params = (42.4382,) res = cur.callproc("round", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 42) def test_callproc_two_params(self): con = self._connect() cur = con.cursor() params = (9, 4) res = cur.callproc("div", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 2) def test_cursor_type(self): class TestCursor(pgdb.Cursor): pass con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur = con.cursor() self.assertIsInstance(cur, TestCursor) con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) def test_row_factory(self): class TestCursor(pgdb.Cursor): def row_factory(self, row): return dict(('column %s' % desc[0], value) for desc, value in zip(self.description, row)) con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) res = cur.execute("select 1 as a, 2 as b") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'column a': 1, 'column b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'column a': 1, 'column b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'column a': 3, 'column b': 4}) def test_build_row_factory(self): class TestCursor(pgdb.Cursor): def build_row_factory(self): keys = [desc[0] for desc in self.description] return lambda row: dict((key, value) for key, value in zip(keys, row)) con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur.execute("select 1 as a, 2 as b") res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'a': 1, 'b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'a': 1, 'b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'a': 3, 'b': 4}) def test_cursor_with_named_columns(self): con = self._connect() cur = con.cursor() res = cur.execute("select 1 as abc, 2 as de, 3 as f") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) self.assertEqual(res._fields, ('abc', 'de', 'f')) self.assertEqual(res.abc, 1) self.assertEqual(res.de, 2) self.assertEqual(res.f, 3) cur.execute("select 1 as one, 2 as two union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], tuple) self.assertEqual(res[0], (1, 2)) self.assertEqual(res[0]._fields, ('one', 'two')) self.assertIsInstance(res[1], tuple) self.assertEqual(res[1], (3, 4)) self.assertEqual(res[1]._fields, ('one', 'two')) def test_cursor_with_unnamed_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) old_py = OrderedDict is None # Python 2.6 or 3.0 # old Python versions cannot rename tuple fields with underscore if old_py: self.assertEqual(res._fields, ('column_0', 'column_1', 'column_2')) else: self.assertEqual(res._fields, ('_0', '_1', '_2')) cur.execute("select 1 as one, 2, 3 as three") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) if old_py: # cannot auto rename with underscore self.assertEqual(res._fields, ('one', 'column_1', 'three')) else: self.assertEqual(res._fields, ('one', '_1', 'three')) def test_cursor_with_badly_named_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1 as abc, 2 as def") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2)) old_py = OrderedDict is None # Python 2.6 or 3.0 if old_py: self.assertEqual(res._fields, ('abc', 'column_1')) else: self.assertEqual(res._fields, ('abc', '_1')) cur.execute('select 1 as snake_case, 2 as "CamelCase",' ' 3 as "kebap-case", 4 as "_bad", 5 as "0bad", 6 as "bad$"') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3, 4, 5, 6)) # old Python versions cannot rename tuple fields with underscore self.assertEqual(res._fields[:2], ('snake_case', 'CamelCase')) fields = ('_2', '_3', '_4', '_5') if old_py: fields = tuple('column' + field for field in fields) self.assertEqual(res._fields[2:], fields) def test_colnames(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['?column?', '?column?', '?column?']) cur.execute("select 1 as a, 2 as bc, 3 as def, 4 as g") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['a', 'bc', 'def', 'g']) def test_coltypes(self): con = self._connect() cur = con.cursor() cur.execute("select 1::int2, 2::int4, 3::int8") types = cur.coltypes self.assertIsInstance(types, list) self.assertEqual(types, ['int2', 'int4', 'int8']) def test_description_fields(self): con = self._connect() cur = con.cursor() cur.execute("select 123456789::int8 col0," " 123456.789::numeric(41, 13) as col1," " 'foobar'::char(39) as col2") desc = cur.description self.assertIsInstance(desc, list) self.assertEqual(len(desc), 3) cols = [('int8', 8, None), ('numeric', 41, 13), ('bpchar', 39, None)] for i in range(3): c, d = cols[i], desc[i] self.assertIsInstance(d, tuple) self.assertEqual(len(d), 7) self.assertIsInstance(d.name, str) self.assertEqual(d.name, 'col%d' % i) self.assertIsInstance(d.type_code, str) self.assertEqual(d.type_code, c[0]) self.assertIsNone(d.display_size) self.assertIsInstance(d.internal_size, int) self.assertEqual(d.internal_size, c[1]) if c[2] is not None: self.assertIsInstance(d.precision, int) self.assertEqual(d.precision, c[1]) self.assertIsInstance(d.scale, int) self.assertEqual(d.scale, c[2]) else: self.assertIsNone(d.precision) self.assertIsNone(d.scale) self.assertIsNone(d.null_ok) def test_type_cache_info(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('numeric', type_cache) type_info = type_cache['numeric'] self.assertIn('numeric', type_cache) self.assertEqual(type_info, 'numeric') self.assertEqual(type_info.oid, 1700) self.assertEqual(type_info.len, -1) self.assertEqual(type_info.type, 'b') # base self.assertEqual(type_info.category, 'N') # numeric self.assertEqual(type_info.delim, ',') self.assertEqual(type_info.relid, 0) self.assertIs(con.type_cache[1700], type_info) self.assertNotIn('pg_type', type_cache) type_info = type_cache['pg_type'] self.assertIn('pg_type', type_cache) self.assertEqual(type_info.type, 'c') # composite self.assertEqual(type_info.category, 'C') # composite cols = type_cache.get_fields('pg_type') if cols[0].name == 'oid': # PostgreSQL < 12 del cols[0] self.assertEqual(cols[0].name, 'typname') typname = type_cache[cols[0].type] self.assertEqual(typname, 'name') self.assertEqual(typname.type, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertEqual(cols[3].name, 'typlen') typlen = type_cache[cols[3].type] self.assertEqual(typlen, 'int2') self.assertEqual(typlen.type, 'b') # base self.assertEqual(typlen.category, 'N') # numeric cur.close() cur = con.cursor() type_cache = con.type_cache self.assertIn('numeric', type_cache) cur.close() finally: con.close() con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('pg_type', type_cache) self.assertEqual(type_cache.get('pg_type'), type_info) self.assertIn('pg_type', type_cache) self.assertIsNone(type_cache.get( self.table_prefix + '_surely_does_not_exist')) cur.close() finally: con.close() def test_type_cache_typecast(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertIs(type_cache.get_typecast('int4'), int) cast_int = lambda v: 'int(%s)' % v type_cache.set_typecast('int4', cast_int) query = 'select 2::int2, 4::int4, 8::int8' cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) self.assertEqual(type_cache.typecast(42, 'int4'), 'int(42)') type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') type_cache.reset_typecast('int4') cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast(['int2', 'int8']) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast() cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) cur.close() finally: con.close() def test_cursor_iteration(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2 union select 3 order by 1") self.assertEqual([r[0] for r in cur], [1, 2, 3]) def test_cursor_invalidation(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2") self.assertEqual(cur.fetchone(), (1,)) self.assertFalse(con.closed) con.close() self.assertTrue(con.closed) self.assertRaises(pgdb.OperationalError, cur.fetchone) def test_fetch_2_rows(self): Decimal = pgdb.decimal_type() values = ('test', pgdb.Binary(b'\xff\x52\xb2'), True, 5, 6, 5.7, Decimal('234.234234'), Decimal('75.45'), pgdb.Date(2011, 7, 17), pgdb.Time(15, 47, 42), pgdb.Timestamp(2008, 10, 20, 15, 25, 35), pgdb.Interval(15, 31, 5), 7897234) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set datestyle to iso") cur.execute("create table %s (" "stringtest varchar," "binarytest bytea," "booltest bool," "integertest int4," "longtest int8," "floattest float8," "numerictest numeric," "moneytest money," "datetest date," "timetest time," "datetimetest timestamp," "intervaltest interval," "rowidtest oid)" % table) cur.execute("set standard_conforming_strings to on") for s in ('numeric', 'monetary', 'time'): cur.execute("set lc_%s to 'C'" % s) for _i in range(2): cur.execute("insert into %s values (" "%%s,%%s,%%s,%%s,%%s,%%s,%%s," "'%%s'::money,%%s,%%s,%%s,%%s,%%s)" % table, values) cur.execute("select * from %s" % table) rows = cur.fetchall() self.assertEqual(len(rows), 2) row0 = rows[0] self.assertEqual(row0, values) self.assertEqual(row0, rows[1]) self.assertIsInstance(row0[0], str) self.assertIsInstance(row0[1], bytes) self.assertIsInstance(row0[2], bool) self.assertIsInstance(row0[3], int) self.assertIsInstance(row0[4], long) self.assertIsInstance(row0[5], float) self.assertIsInstance(row0[6], Decimal) self.assertIsInstance(row0[7], Decimal) self.assertIsInstance(row0[8], date) self.assertIsInstance(row0[9], time) self.assertIsInstance(row0[10], datetime) self.assertIsInstance(row0[11], timedelta) finally: con.close() def test_integrity_error(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set client_min_messages = warning") cur.execute("create table %s (i int primary key)" % table) cur.execute("insert into %s values (1)" % table) cur.execute("insert into %s values (2)" % table) self.assertRaises(pgdb.IntegrityError, cur.execute, "insert into %s values (1)" % table) finally: con.close() def test_update_rowcount(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s (i int)" % table) cur.execute("insert into %s values (1)" % table) cur.execute("update %s set i=2 where i=2 returning i" % table) self.assertEqual(cur.rowcount, 0) cur.execute("update %s set i=2 where i=1 returning i" % table) self.assertEqual(cur.rowcount, 1) cur.close() # keep rowcount even if cursor is closed (needed by SQLAlchemy) self.assertEqual(cur.rowcount, 1) finally: con.close() def test_sqlstate(self): con = self._connect() cur = con.cursor() try: cur.execute("select 1/0") except pgdb.DatabaseError as error: self.assertTrue(isinstance(error, pgdb.DataError)) # the SQLSTATE error code for division by zero is 22012 self.assertEqual(error.sqlstate, '22012') def test_float(self): nan, inf = float('nan'), float('inf') from math import isnan, isinf self.assertTrue(isnan(nan) and not isinf(nan)) self.assertTrue(isinf(inf) and not isnan(inf)) values = [0, 1, 0.03125, -42.53125, nan, inf, -inf, 'nan', 'inf', '-inf', 'NaN', 'Infinity', '-Infinity'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( "create table %s (n smallint, floattest float)" % table) params = enumerate(values) cur.executemany("insert into %s values (%%d,%%s)" % table, params) cur.execute("select floattest from %s order by n" % table) rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.FLOAT) self.assertNotEqual(cur.description[0].type_code, pgdb.ARRAY) self.assertNotEqual(cur.description[0].type_code, pgdb.RECORD) finally: con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] for inval, outval in zip(values, rows): if inval in ('inf', 'Infinity'): inval = inf elif inval in ('-inf', '-Infinity'): inval = -inf elif inval in ('nan', 'NaN'): inval = nan if isinf(inval): self.assertTrue(isinf(outval)) if inval < 0: self.assertTrue(outval < 0) else: self.assertTrue(outval > 0) elif isnan(inval): self.assertTrue(isnan(outval)) else: self.assertEqual(inval, outval) def test_datetime(self): dt = datetime(2011, 7, 17, 15, 47, 42, 317509) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set timezone = UTC") cur.execute("create table %s (" "d date, t time, ts timestamp," "tz timetz, tsz timestamptz)" % table) for n in range(3): values = [dt.date(), dt.time(), dt, dt.time(), dt] values[3] = values[3].replace(tzinfo=pgdb.timezone.utc) values[4] = values[4].replace(tzinfo=pgdb.timezone.utc) if n == 0: # input as objects params = values if n == 1: # input as text params = [v.isoformat() for v in values] # as text elif n == 2: # input using type helpers d = (dt.year, dt.month, dt.day) t = (dt.hour, dt.minute, dt.second, dt.microsecond) z = (pgdb.timezone.utc,) params = [pgdb.Date(*d), pgdb.Time(*t), pgdb.Timestamp(*(d + t)), pgdb.Time(*(t + z)), pgdb.Timestamp(*(d + t + z))] for datestyle in ('iso', 'postgres, mdy', 'postgres, dmy', 'sql, mdy', 'sql, dmy', 'german'): cur.execute("set datestyle to %s" % datestyle) if n != 1: cur.execute("select %s,%s,%s,%s,%s", params) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute("insert into %s" " values (%%s,%%s,%%s,%%s,%%s)" % table, params) cur.execute("select * from %s" % table) d = cur.description for i in range(5): self.assertEqual(d[i].type_code, pgdb.DATETIME) self.assertNotEqual(d[i].type_code, pgdb.STRING) self.assertNotEqual(d[i].type_code, pgdb.ARRAY) self.assertNotEqual(d[i].type_code, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.DATE) self.assertEqual(d[1].type_code, pgdb.TIME) self.assertEqual(d[2].type_code, pgdb.TIMESTAMP) self.assertEqual(d[3].type_code, pgdb.TIME) self.assertEqual(d[4].type_code, pgdb.TIMESTAMP) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute("delete from %s" % table) finally: con.close() def test_interval(self): td = datetime(2011, 7, 17, 15, 47, 42, 317509) - datetime(1970, 1, 1) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s (i interval)" % table) for n in range(3): if n == 0: # input as objects param = td if n == 1: # input as text param = '%d days %d seconds %d microseconds ' % ( td.days, td.seconds, td.microseconds) elif n == 2: # input using type helpers param = pgdb.Interval( td.days, 0, 0, td.seconds, td.microseconds) for intervalstyle in ('sql_standard ', 'postgres', 'postgres_verbose', 'iso_8601'): cur.execute("set intervalstyle to %s" % intervalstyle) cur.execute("insert into %s" " values (%%s)" % table, [param]) cur.execute("select * from %s" % table) tc = cur.description[0].type_code self.assertEqual(tc, pgdb.DATETIME) self.assertNotEqual(tc, pgdb.STRING) self.assertNotEqual(tc, pgdb.ARRAY) self.assertNotEqual(tc, pgdb.RECORD) self.assertEqual(tc, pgdb.INTERVAL) row = cur.fetchone() self.assertEqual(row, (td,)) cur.execute("delete from %s" % table) finally: con.close() def test_hstore(self): con = self._connect() try: cur = con.cursor() cur.execute("select 'k=>v'::hstore") except pgdb.DatabaseError: try: cur.execute("create extension hstore") except pgdb.DatabaseError: self.skipTest("hstore extension not enabled") finally: con.close() d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', 'back\\': '\\slash', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} con = self._connect() try: cur = con.cursor() cur.execute("select %s::hstore", (pgdb.Hstore(d),)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, dict) self.assertEqual(result, d) def test_uuid(self): self.assertIs(Uuid, pgdb.Uuid) d = Uuid('{12345678-1234-5678-1234-567812345678}') con = self._connect() try: cur = con.cursor() cur.execute("select %s::uuid", (d,)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, Uuid) self.assertEqual(result, d) def test_insert_array(self): values = [(None, None), ([], []), ([None], [[None], ['null']]), ([1, 2, 3], [['a', 'b'], ['c', 'd']]), ([20000, 25000, 25000, 30000], [['breakfast', 'consulting'], ['meeting', 'lunch']]), ([0, 1, -1], [['Hello, World!', '"Hi!"'], ['{x,y}', ' x y ']])] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("create table %s" " (n smallint, i int[], t text[][])" % table) params = [(n, v[0], v[1]) for n, v in enumerate(values)] # Note that we must explicit casts because we are inserting # empty arrays. Otherwise this is not necessary. cur.executemany("insert into %s values" " (%%d,%%s::int[],%%s::text[][])" % table, params) cur.execute("select i, t from %s order by n" % table) d = cur.description self.assertEqual(d[0].type_code, pgdb.ARRAY) self.assertNotEqual(d[0].type_code, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.NUMBER) self.assertEqual(d[0].type_code, pgdb.INTEGER) self.assertEqual(d[1].type_code, pgdb.ARRAY) self.assertNotEqual(d[1].type_code, pgdb.RECORD) self.assertEqual(d[1].type_code, pgdb.STRING) rows = cur.fetchall() finally: con.close() self.assertEqual(rows, values) def test_select_array(self): values = ([1, 2, 3, None], ['a', 'b', 'c', None]) con = self._connect() try: cur = con.cursor() cur.execute("select %s::int[], %s::text[]", values) row = cur.fetchone() finally: con.close() self.assertEqual(row, values) def test_unicode_list_and_tuple(self): value = (u'Käse', u'Würstchen') con = self._connect() try: cur = con.cursor() try: cur.execute("select %s, %s", value) except pgdb.DatabaseError: self.skipTest('database does not support latin-1') row = cur.fetchone() cur.execute("select %s, %s", (list(value), tuple(value))) as_list, as_tuple = cur.fetchone() finally: con.close() self.assertEqual(as_list, list(row)) self.assertEqual(as_tuple, tuple(row)) def test_insert_record(self): values = [('John', 61), ('Jane', 63), ('Fred', None), ('Wilma', None), (None, 42), (None, None)] table = self.table_prefix + 'booze' record = self.table_prefix + 'munch' con = self._connect() try: cur = con.cursor() cur.execute("create type %s as (name varchar, age int)" % record) cur.execute("create table %s (n smallint, r %s)" % (table, record)) params = enumerate(values) cur.executemany("insert into %s values (%%d,%%s)" % table, params) cur.execute("select r from %s order by n" % table) type_code = cur.description[0].type_code self.assertEqual(type_code, record) self.assertEqual(type_code, pgdb.RECORD) self.assertNotEqual(type_code, pgdb.ARRAY) columns = con.type_cache.get_fields(type_code) self.assertEqual(columns[0].name, 'name') self.assertEqual(columns[1].name, 'age') self.assertEqual(con.type_cache[columns[0].type], 'varchar') self.assertEqual(con.type_cache[columns[1].type], 'int4') rows = cur.fetchall() finally: cur.execute('drop table %s' % table) cur.execute('drop type %s' % record) con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] self.assertEqual(rows, values) self.assertEqual(rows[0].name, 'John') self.assertEqual(rows[0].age, 61) def test_select_record(self): value = (1, 25000, 2.5, 'hello', 'Hello World!', 'Hello, World!', '(test)', '(x,y)', ' x y ', 'null', None) con = self._connect() try: cur = con.cursor() cur.execute("select %s as test_record", [value]) self.assertEqual(cur.description[0].name, 'test_record') self.assertEqual(cur.description[0].type_code, 'record') row = cur.fetchone()[0] finally: con.close() # Note that the element types get lost since we created an # untyped record (an anonymous composite type). For the same # reason this is also a normal tuple, not a named tuple. text_row = tuple(None if v is None else str(v) for v in value) self.assertEqual(row, text_row) def test_custom_type(self): values = [3, 5, 65] values = list(map(PgBitString, values)) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() params = enumerate(values) # params have __pg_repr__ method cur.execute( 'create table "%s" (n smallint, b bit varying(7))' % table) cur.executemany("insert into %s values (%%s,%%s)" % table, params) cur.execute("select * from %s" % table) rows = cur.fetchall() finally: con.close() self.assertEqual(len(rows), len(values)) con = self._connect() try: cur = con.cursor() params = (1, object()) # an object that cannot be handled self.assertRaises(pgdb.InterfaceError, cur.execute, "insert into %s values (%%s,%%s)" % table, params) finally: con.close() def test_set_decimal_type(self): decimal_type = pgdb.decimal_type() self.assertTrue(decimal_type is not None and callable(decimal_type)) con = self._connect() try: cur = con.cursor() # change decimal type globally to int int_type = lambda v: int(float(v)) self.assertTrue(pgdb.decimal_type(int_type) is int_type) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] self.assertTrue(isinstance(value, int)) self.assertEqual(value, 4) # change decimal type again to float self.assertTrue(pgdb.decimal_type(float) is float) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # the connection still uses the old setting self.assertTrue(isinstance(value, int)) # bust the cache for type functions for the connection con.type_cache.reset_typecast() cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # now the connection uses the new setting self.assertTrue(isinstance(value, float)) self.assertEqual(value, 4.25) finally: con.close() pgdb.decimal_type(decimal_type) self.assertTrue(pgdb.decimal_type() is decimal_type) def test_global_typecast(self): try: query = 'select 2::int2, 4::int4, 8::int8' self.assertIs(pgdb.get_typecast('int4'), int) cast_int = lambda v: 'int(%s)' % v pgdb.set_typecast('int4', cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') pgdb.reset_typecast('int4') con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') pgdb.reset_typecast(['int2', 'int8']) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') finally: pgdb.reset_typecast() con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) def test_set_typecast_for_arrays(self): query = 'select ARRAY[1,2,3]' try: con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) pgdb.set_typecast('anyarray', lambda v, basecast: v) con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, str) self.assertEqual(r, '{1,2,3}') finally: pgdb.reset_typecast() con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) def test_unicode_with_utf8(self): table = self.table_prefix + 'booze' input = u"He wes Leovenaðes sone — liðe him be Drihten" con = self._connect() try: cur = con.cursor() cur.execute("create table %s (t text)" % table) try: cur.execute("set client_encoding=utf8") cur.execute(u"select '%s'" % input) except Exception: self.skipTest("database does not support utf8") output1 = cur.fetchone()[0] cur.execute("insert into %s values (%%s)" % table, (input,)) cur.execute("select * from %s" % table) output2 = cur.fetchone()[0] cur.execute("select t = '%s' from %s" % (input, table)) output3 = cur.fetchone()[0] cur.execute("select t = %%s from %s" % table, (input,)) output4 = cur.fetchone()[0] finally: con.close() if str is bytes: # Python < 3.0 input = input.encode('utf8') self.assertIsInstance(output1, str) self.assertEqual(output1, input) self.assertIsInstance(output2, str) self.assertEqual(output2, input) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_unicode_with_latin1(self): table = self.table_prefix + 'booze' input = u"Ehrt den König seine Würde, ehret uns der Hände Fleiß." con = self._connect() try: cur = con.cursor() cur.execute("create table %s (t text)" % table) try: cur.execute("set client_encoding=latin1") cur.execute(u"select '%s'" % input) except Exception: self.skipTest("database does not support latin1") output1 = cur.fetchone()[0] cur.execute("insert into %s values (%%s)" % table, (input,)) cur.execute("select * from %s" % table) output2 = cur.fetchone()[0] cur.execute("select t = '%s' from %s" % (input, table)) output3 = cur.fetchone()[0] cur.execute("select t = %%s from %s" % table, (input,)) output4 = cur.fetchone()[0] finally: con.close() if str is bytes: # Python < 3.0 input = input.encode('latin1') self.assertIsInstance(output1, str) self.assertEqual(output1, input) self.assertIsInstance(output2, str) self.assertEqual(output2, input) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_bool(self): values = [False, True, None, 't', 'f', 'true', 'false'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( "create table %s (n smallint, booltest bool)" % table) params = enumerate(values) cur.executemany("insert into %s values (%%s,%%s)" % table, params) cur.execute("select booltest from %s order by n" % table) rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.BOOL) finally: con.close() rows = [row[0] for row in rows] values[3] = values[5] = True values[4] = values[6] = False self.assertEqual(rows, values) def test_literal(self): con = self._connect() try: cur = con.cursor() value = "lower('Hello')" cur.execute("select %s, %s", (value, pgdb.Literal(value))) row = cur.fetchone() finally: con.close() self.assertEqual(row, (value, 'hello')) def test_json(self): inval = {"employees": [{"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute("create table %s (jsontest json)" % table) except pgdb.ProgrammingError: self.skipTest('database does not support json') params = (pgdb.Json(inval),) cur.execute("insert into %s values (%%s)" % table, params) cur.execute("select jsontest from %s" % table) outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_jsonb(self): inval = {"employees": [{"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute("create table %s (jsonbtest jsonb)" % table) except pgdb.ProgrammingError: self.skipTest('database does not support jsonb') params = (pgdb.Json(inval),) cur.execute("insert into %s values (%%s)" % table, params) cur.execute("select jsonbtest from %s" % table) outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_execute_edge_cases(self): con = self._connect() try: cur = con.cursor() sql = 'invalid' # should be ignored with empty parameter list cur.executemany(sql, []) sql = 'select %d + 1' cur.execute(sql, [(1,), (2,)]) # deprecated use of execute() self.assertEqual(cur.fetchone()[0], 3) sql = 'select 1/0' # cannot be executed self.assertRaises(pgdb.DataError, cur.execute, sql) cur.close() con.rollback() if pgdb.shortcutmethods: res = con.execute('select %d', (1,)).fetchone() self.assertEqual(res, (1,)) res = con.executemany('select %d', [(1,), (2,)]).fetchone() self.assertEqual(res, (2,)) finally: con.close() sql = 'select 1' # cannot be executed after connection is closed self.assertRaises(pgdb.OperationalError, cur.execute, sql) def test_fetchmany_with_keep(self): con = self._connect() try: cur = con.cursor() self.assertEqual(cur.arraysize, 1) cur.execute('select * from generate_series(1, 25)') self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(cur.arraysize, 1) cur.arraysize = 3 self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany(size=2)), 2) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany(size=2, keep=True)), 2) self.assertEqual(cur.arraysize, 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany(25)), 3) finally: con.close() def test_nextset(self): con = self._connect() cur = con.cursor() self.assertRaises(con.NotSupportedError, cur.nextset) def test_setoutputsize(self): pass # not supported def test_connection_errors(self): con = self._connect() self.assertEqual(con.Error, pgdb.Error) self.assertEqual(con.Warning, pgdb.Warning) self.assertEqual(con.InterfaceError, pgdb.InterfaceError) self.assertEqual(con.DatabaseError, pgdb.DatabaseError) self.assertEqual(con.InternalError, pgdb.InternalError) self.assertEqual(con.OperationalError, pgdb.OperationalError) self.assertEqual(con.ProgrammingError, pgdb.ProgrammingError) self.assertEqual(con.IntegrityError, pgdb.IntegrityError) self.assertEqual(con.DataError, pgdb.DataError) self.assertEqual(con.NotSupportedError, pgdb.NotSupportedError) def test_transaction(self): table = self.table_prefix + 'booze' con1 = self._connect() cur1 = con1.cursor() self.executeDDL1(cur1) con1.commit() con2 = self._connect() cur2 = con2.cursor() cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) cur1.execute("insert into %s values('Schlafly')" % table) cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) con1.commit() cur2.execute("select name from %s" % table) self.assertEqual(cur2.fetchone(), ('Schlafly',)) con2.close() con1.close() def test_autocommit(self): table = self.table_prefix + 'booze' con1 = self._connect() con1.autocommit = True cur1 = con1.cursor() self.executeDDL1(cur1) con2 = self._connect() cur2 = con2.cursor() cur2.execute("select name from %s" % table) self.assertIsNone(cur2.fetchone()) cur1.execute("insert into %s values('Shmaltz Pastrami')" % table) cur2.execute("select name from %s" % table) self.assertEqual(cur2.fetchone(), ('Shmaltz Pastrami',)) con2.close() con1.close() def test_connection_as_contextmanager(self): table = self.table_prefix + 'booze' for autocommit in False, True: con = self._connect() con.autocommit = autocommit try: cur = con.cursor() if autocommit: cur.execute("truncate %s" % table) else: cur.execute( "create table %s (n smallint check(n!=4))" % table) with con: cur.execute("insert into %s values (1)" % table) cur.execute("insert into %s values (2)" % table) try: with con: cur.execute("insert into %s values (3)" % table) cur.execute("insert into %s values (4)" % table) except con.IntegrityError as error: self.assertTrue('check' in str(error).lower()) with con: cur.execute("insert into %s values (5)" % table) cur.execute("insert into %s values (6)" % table) try: with con: cur.execute("insert into %s values (7)" % table) cur.execute("insert into %s values (8)" % table) raise ValueError('transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'transaction should rollback') with con: cur.execute("insert into %s values (9)" % table) cur.execute("select * from %s order by 1" % table) rows = cur.fetchall() rows = [row[0] for row in rows] finally: con.close() self.assertEqual(rows, [1, 2, 5, 6, 9]) def test_cursor_connection(self): con = self._connect() cur = con.cursor() self.assertEqual(cur.connection, con) cur.close() def test_cursor_as_contextmanager(self): con = self._connect() with con.cursor() as cur: self.assertEqual(cur.connection, con) def test_pgdb_type(self): self.assertEqual(pgdb.STRING, pgdb.STRING) self.assertNotEqual(pgdb.STRING, pgdb.INTEGER) self.assertNotEqual(pgdb.STRING, pgdb.BOOL) self.assertNotEqual(pgdb.BOOL, pgdb.INTEGER) self.assertEqual(pgdb.INTEGER, pgdb.INTEGER) self.assertNotEqual(pgdb.INTEGER, pgdb.NUMBER) self.assertEqual('char', pgdb.STRING) self.assertEqual('varchar', pgdb.STRING) self.assertEqual('text', pgdb.STRING) self.assertNotEqual('numeric', pgdb.STRING) self.assertEqual('numeric', pgdb.NUMERIC) self.assertEqual('numeric', pgdb.NUMBER) self.assertEqual('int4', pgdb.NUMBER) self.assertNotEqual('int4', pgdb.NUMERIC) self.assertEqual('int2', pgdb.SMALLINT) self.assertNotEqual('int4', pgdb.SMALLINT) self.assertEqual('int2', pgdb.INTEGER) self.assertEqual('int4', pgdb.INTEGER) self.assertEqual('int8', pgdb.INTEGER) self.assertNotEqual('int4', pgdb.LONG) self.assertEqual('int8', pgdb.LONG) self.assertTrue('char' in pgdb.STRING) self.assertTrue(pgdb.NUMERIC <= pgdb.NUMBER) self.assertTrue(pgdb.NUMBER >= pgdb.INTEGER) self.assertTrue(pgdb.TIME <= pgdb.DATETIME) self.assertTrue(pgdb.DATETIME >= pgdb.DATE) self.assertEqual(pgdb.ARRAY, pgdb.ARRAY) self.assertNotEqual(pgdb.ARRAY, pgdb.STRING) self.assertEqual('_char', pgdb.ARRAY) self.assertNotEqual('char', pgdb.ARRAY) self.assertEqual(pgdb.RECORD, pgdb.RECORD) self.assertNotEqual(pgdb.RECORD, pgdb.STRING) self.assertNotEqual(pgdb.RECORD, pgdb.ARRAY) self.assertEqual('record', pgdb.RECORD) self.assertNotEqual('_record', pgdb.RECORD) def test_no_close(self): data = ('hello', 'world') con = self._connect() cur = con.cursor() cur.build_row_factory = lambda: tuple cur.execute("select %s, %s", data) row = cur.fetchone() self.assertEqual(row, data) def test_set_row_factory_size(self): try: from functools import lru_cache except ImportError: # Python < 3.2 lru_cache = None queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc'] con = self._connect() cur = con.cursor() for maxsize in (None, 0, 1, 2, 3, 10, 1024): pgdb.set_row_factory_size(maxsize) for i in range(3): for q in queries: cur.execute(q) r = cur.fetchone() if q.endswith('abc'): self.assertEqual(r, (123,)) self.assertEqual(r._fields, ('abc',)) else: self.assertEqual(r, (1, 2, 3)) self.assertEqual(r._fields, ('a', 'b', 'c')) if lru_cache: info = pgdb._row_factory.cache_info() self.assertEqual(info.maxsize, maxsize) self.assertEqual(info.hits + info.misses, 6) self.assertEqual(info.hits, 0 if maxsize is not None and maxsize < 2 else 4) def test_memory_leaks(self): ids = set() objs = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) self.test_no_close() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] if objs and sys.version_info[:3] in ((3, 5, 0), (3, 5, 1)): # workaround for Python issue 26811 objs[:] = [obj for obj in objs if repr(obj) != '(,)'] self.assertEqual(len(objs), 0) def test_cve_2018_1058(self): # internal queries should use qualified table and operator names, # see https://nvd.nist.gov/vuln/detail/CVE-2018-1058 con = self._connect() cur = con.cursor() execute = cur.execute try: execute("SET client_min_messages TO WARNING") execute("SET TIMEZONE TO 'UTC'") execute("SHOW TIMEZONE") self.assertEqual(cur.fetchone()[0], 'UTC') execute(""" CREATE OR REPLACE FUNCTION public.bad_eq(oid, integer) RETURNS boolean AS $$ BEGIN SET TIMEZONE TO 'CET'; RETURN oideq($1, $2::oid); END $$ LANGUAGE plpgsql """) execute("DROP OPERATOR IF EXISTS public.= (oid, integer)") execute(""" CREATE OPERATOR public.= ( PROCEDURE = public.bad_eq, LEFTARG = oid, RIGHTARG = integer ); """) # the following select changes the time zone as a side effect if # internal query uses unqualified = operator as it did earlier execute("SELECT 1") execute("SHOW TIMEZONE") # make sure time zone has not changed self.assertEqual(cur.fetchone()[0], 'UTC') finally: execute("DROP OPERATOR IF EXISTS public.= (oid, integer)") execute("DROP FUNCTION IF EXISTS public.bad_eq(oid, integer)") cur.close() con.close() if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_dbapi20_copy.py000066400000000000000000000474321365010227600204270ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- """Test the modern PyGreSQL interface. Sub-tests for the copy methods. Contributed by Christoph Zwerschke. These tests need a database to test against. """ try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest try: from collections.abc import Iterable except ImportError: # Python < 3.3 from collections import Iterable import pgdb # the module under test # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass try: # noinspection PyUnresolvedReferences unicode except NameError: # Python >= 3.0 unicode = str class InputStream: def __init__(self, data): if isinstance(data, unicode): data = data.encode('utf-8') self.data = data or b'' self.sizes = [] def __str__(self): data = self.data if str is unicode: # Python >= 3.0 data = data.decode('utf-8') return data def __len__(self): return len(self.data) def read(self, size=None): if size is None: output, data = self.data, b'' else: output, data = self.data[:size], self.data[size:] self.data = data self.sizes.append(size) return output class OutputStream: def __init__(self): self.data = b'' self.sizes = [] def __str__(self): data = self.data if str is unicode: # Python >= 3.0 data = data.decode('utf-8') return data def __len__(self): return len(self.data) def write(self, data): if isinstance(data, unicode): data = data.encode('utf-8') self.data += data self.sizes.append(len(data)) class TestStreams(unittest.TestCase): def test_input(self): stream = InputStream('Hello, Wörld!') self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.read(3), b'Hel') self.assertEqual(stream.read(2), b'lo') self.assertEqual(stream.read(1), b',') self.assertEqual(stream.read(1), b' ') self.assertEqual(stream.read(), b'W\xc3\xb6rld!') self.assertEqual(stream.read(), b'') self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [3, 2, 1, 1, None, None]) def test_output(self): stream = OutputStream() self.assertEqual(len(stream), 0) for chunk in 'Hel', 'lo', ',', ' ', 'Wörld!': stream.write(chunk) self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.sizes, [3, 2, 1, 1, 7]) class TestCopy(unittest.TestCase): cls_set_up = False @staticmethod def connect(): return pgdb.connect(database=dbname, host='%s:%d' % (dbhost or '', dbport or -1)) @classmethod def setUpClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") cur.execute("create table copytest (" "id smallint primary key, name varchar(64))") cur.close() con.commit() cur = con.cursor() try: cur.execute("set client_encoding=utf8") cur.execute("select 'Plácido and José'").fetchone() except (pgdb.DataError, pgdb.NotSupportedError): cls.data[1] = (1941, 'Plaacido Domingo') cls.data[2] = (1946, 'Josee Carreras') cls.can_encode = False cur.close() con.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") con.commit() con.close() def setUp(self): self.assertTrue(self.cls_set_up) self.con = self.connect() self.cursor = self.con.cursor() self.cursor.execute("set client_encoding=utf8") def tearDown(self): try: self.cursor.close() except Exception: pass try: self.con.rollback() except Exception: pass try: self.con.close() except Exception: pass data = [(1935, 'Luciano Pavarotti'), (1941, 'Plácido Domingo'), (1946, 'José Carreras')] can_encode = True @property def data_text(self): return ''.join('%d\t%s\n' % row for row in self.data) @property def data_csv(self): return ''.join('%d,%s\n' % row for row in self.data) def truncate_table(self): self.cursor.execute("truncate table copytest") @property def table_data(self): self.cursor.execute("select * from copytest") return self.cursor.fetchall() def check_table(self): self.assertEqual(self.table_data, self.data) def check_rowcount(self, number=len(data)): self.assertEqual(self.cursor.rowcount, number) class TestCopyFrom(TestCopy): """Test the copy_from method.""" def tearDown(self): super(TestCopyFrom, self).tearDown() self.setUp() self.truncate_table() super(TestCopyFrom, self).tearDown() def copy_from(self, stream, **options): return self.cursor.copy_from(stream, 'copytest', **options) @property def data_file(self): return InputStream(self.data_text) def test_bad_params(self): call = self.cursor.copy_from call('0\t', 'copytest'), self.cursor call('1\t', 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, None) self.assertRaises(TypeError, call, '0\t') self.assertRaises(TypeError, call, '0\t', None) self.assertRaises(TypeError, call, '0\t', 42) self.assertRaises(TypeError, call, '0\t', ['copytest']) self.assertRaises(TypeError, call, '0\t', 'copytest', format=42) self.assertRaises(ValueError, call, '0\t', 'copytest', format='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', sep=42) self.assertRaises(ValueError, call, '0\t', 'copytest', sep='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', null=42) self.assertRaises(ValueError, call, '0\t', 'copytest', size='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', columns=42) self.assertRaises(ValueError, call, b'', 'copytest', format='binary', sep=',') def test_input_string(self): ret = self.copy_from('42\tHello, world!') self.assertIs(ret, self.cursor) self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_with_newline(self): self.copy_from('42\tHello, world!\n') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_multiple_rows(self): ret = self.copy_from(self.data_text) self.assertIs(ret, self.cursor) self.check_table() self.check_rowcount() if str is unicode: # Python >= 3.0 def test_input_bytes(self): self.copy_from(b'42\tHello, world!') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.truncate_table() self.copy_from(self.data_text.encode('utf-8')) self.check_table() else: # Python < 3.0 def test_input_unicode(self): if not self.can_encode: self.skipTest('database does not support utf8') self.copy_from(u'43\tWürstel, Käse!') self.assertEqual(self.table_data, [(43, 'Würstel, Käse!')]) self.truncate_table() self.copy_from(self.data_text.decode('utf-8')) self.check_table() def test_input_iterable(self): self.copy_from(self.data_text.splitlines()) self.check_table() self.check_rowcount() def test_input_iterable_invalid(self): self.assertRaises(IOError, self.copy_from, [None]) def test_input_iterable_with_newlines(self): self.copy_from('%s\n' % row for row in self.data_text.splitlines()) self.check_table() if str is unicode: # Python >= 3.0 def test_input_iterable_bytes(self): self.copy_from(row.encode('utf-8') for row in self.data_text.splitlines()) self.check_table() def test_sep(self): stream = ('%d-%s' % row for row in self.data) self.copy_from(stream, sep='-') self.check_table() def test_null(self): self.copy_from('0\t\\N') self.assertEqual(self.table_data, [(0, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('1\tNix') self.assertEqual(self.table_data, [(1, 'Nix')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('2\tNix', null='Nix') self.assertEqual(self.table_data, [(2, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('3\t') self.assertEqual(self.table_data, [(3, '')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('4\t', null='') self.assertEqual(self.table_data, [(4, None)]) self.assertIsNone(self.table_data[0][1]) def test_columns(self): self.copy_from('1', columns='id') self.copy_from('2', columns=['id']) self.copy_from('3\tThree') self.copy_from('4\tFour', columns='id, name') self.copy_from('5\tFive', columns=['id', 'name']) self.assertEqual(self.table_data, [ (1, None), (2, None), (3, 'Three'), (4, 'Four'), (5, 'Five')]) self.check_rowcount(5) self.assertRaises(pgdb.ProgrammingError, self.copy_from, '6\t42', columns=['id', 'age']) self.check_rowcount(-1) def test_csv(self): self.copy_from(self.data_csv, format='csv') self.check_table() def test_csv_with_sep(self): stream = ('%d;"%s"\n' % row for row in self.data) self.copy_from(stream, format='csv', sep=';') self.check_table() self.check_rowcount() def test_binary(self): self.assertRaises(IOError, self.copy_from, b'NOPGCOPY\n', format='binary') self.check_rowcount(-1) def test_binary_with_sep(self): self.assertRaises(ValueError, self.copy_from, '', format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises(ValueError, self.copy_from, u'', format='binary') def test_query(self): self.assertRaises(ValueError, self.cursor.copy_from, '', "select null") def test_file(self): stream = self.data_file ret = self.copy_from(stream) self.assertIs(ret, self.cursor) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [8192]) self.check_rowcount() def test_size_positive(self): stream = self.data_file size = 7 num_chunks = (len(stream) + size - 1) // size self.copy_from(stream, size=size) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [size] * num_chunks) self.check_rowcount() def test_size_negative(self): stream = self.data_file self.copy_from(stream, size=-1) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [None]) self.check_rowcount() def test_size_invalid(self): self.assertRaises(TypeError, self.copy_from, self.data_file, size='invalid') class TestCopyTo(TestCopy): """Test the copy_to method.""" @classmethod def setUpClass(cls): super(TestCopyTo, cls).setUpClass() con = cls.connect() cur = con.cursor() cur.execute("set client_encoding=utf8") cur.execute("insert into copytest values (%d, %s)", cls.data) cur.close() con.commit() con.close() def copy_to(self, stream=None, **options): return self.cursor.copy_to(stream, 'copytest', **options) @property def data_file(self): return OutputStream() def test_bad_params(self): call = self.cursor.copy_to call(None, 'copytest') call(None, 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, 42) self.assertRaises(TypeError, call, None, ['copytest']) self.assertRaises(TypeError, call, 'bad', 'copytest') self.assertRaises(TypeError, call, None, 'copytest', format=42) self.assertRaises(ValueError, call, None, 'copytest', format='bad') self.assertRaises(TypeError, call, None, 'copytest', sep=42) self.assertRaises(ValueError, call, None, 'copytest', sep='bad') self.assertRaises(TypeError, call, None, 'copytest', null=42) self.assertRaises(TypeError, call, None, 'copytest', decode='bad') self.assertRaises(TypeError, call, None, 'copytest', columns=42) def test_generator(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, str) self.assertEqual(rows, self.data_text) self.check_rowcount() if str is unicode: # Python >= 3.0 def test_generator_bytes(self): ret = self.copy_to(decode=False) self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = b''.join(rows) self.assertIsInstance(rows, bytes) self.assertEqual(rows, self.data_text.encode('utf-8')) else: # Python < 3.0 def test_generator_unicode(self): ret = self.copy_to(decode=True) self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, unicode) self.assertEqual(rows, self.data_text.decode('utf-8')) def test_rowcount_increment(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) for n, row in enumerate(ret): self.check_rowcount(n + 1) def test_decode(self): ret_raw = b''.join(self.copy_to(decode=False)) ret_decoded = ''.join(self.copy_to(decode=True)) self.assertIsInstance(ret_raw, bytes) self.assertIsInstance(ret_decoded, unicode) self.assertEqual(ret_decoded, ret_raw.decode('utf-8')) self.check_rowcount() def test_sep(self): ret = list(self.copy_to(sep='-')) self.assertEqual(ret, ['%d-%s\n' % row for row in self.data]) def test_null(self): data = ['%d\t%s\n' % row for row in self.data] self.cursor.execute('insert into copytest values(4, null)') try: ret = list(self.copy_to()) self.assertEqual(ret, data + ['4\t\\N\n']) ret = list(self.copy_to(null='Nix')) self.assertEqual(ret, data + ['4\tNix\n']) ret = list(self.copy_to(null='')) self.assertEqual(ret, data + ['4\t\n']) finally: self.cursor.execute('delete from copytest where id=4') def test_columns(self): data_id = ''.join('%d\n' % row[0] for row in self.data) data_name = ''.join('%s\n' % row[1] for row in self.data) ret = ''.join(self.copy_to(columns='id')) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns=['id'])) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns='name')) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns=['name'])) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns='id, name')) self.assertEqual(ret, self.data_text) ret = ''.join(self.copy_to(columns=['id', 'name'])) self.assertEqual(ret, self.data_text) self.assertRaises(pgdb.ProgrammingError, self.copy_to, columns=['id', 'age']) def test_csv(self): ret = self.copy_to(format='csv') self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) rows = ''.join(rows) self.assertIsInstance(rows, str) self.assertEqual(rows, self.data_csv) self.check_rowcount(3) def test_csv_with_sep(self): rows = ''.join(self.copy_to(format='csv', sep=';')) self.assertEqual(rows, self.data_csv.replace(',', ';')) def test_binary(self): ret = self.copy_to(format='binary') self.assertIsInstance(ret, Iterable) for row in ret: self.assertTrue(row.startswith(b'PGCOPY\n\377\r\n\0')) break self.check_rowcount(1) def test_binary_with_sep(self): self.assertRaises(ValueError, self.copy_to, format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises(ValueError, self.copy_to, format='binary', decode=True) def test_query(self): self.assertRaises(ValueError, self.cursor.copy_to, None, "select name from copytest", columns='noname') ret = self.cursor.copy_to(None, "select name||'!' from copytest where id=1941") self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 1) self.assertIsInstance(rows[0], str) self.assertEqual(rows[0], '%s!\n' % self.data[1][1]) self.check_rowcount(1) def test_file(self): stream = self.data_file ret = self.copy_to(stream) self.assertIs(ret, self.cursor) self.assertEqual(str(stream), self.data_text) data = self.data_text if str is unicode: # Python >= 3.0 data = data.encode('utf-8') sizes = [len(row) + 1 for row in data.splitlines()] self.assertEqual(stream.sizes, sizes) self.check_rowcount() class TestBinary(TestCopy): """Test the copy_from and copy_to methods with binary data.""" def test_round_trip(self): # fill table from textual data self.cursor.copy_from(self.data_text, 'copytest', format='text') self.check_table() self.check_rowcount() # get data back in binary format ret = self.cursor.copy_to(None, 'copytest', format='binary') self.assertIsInstance(ret, Iterable) data_binary = b''.join(ret) self.assertTrue(data_binary.startswith(b'PGCOPY\n\377\r\n\0')) self.check_rowcount() self.truncate_table() # fill table from binary data self.cursor.copy_from(data_binary, 'copytest', format='binary') self.check_table() self.check_rowcount() if __name__ == '__main__': unittest.main() pygresql-5.1.2/tests/test_tutorial.py000066400000000000000000000151141365010227600200070ustar00rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function try: import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest from pg import DB from pgdb import connect # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' dbhost = None dbport = 5432 try: from .LOCAL_PyGreSQL import * except (ImportError, ValueError): try: from LOCAL_PyGreSQL import * except ImportError: pass class TestClassicTutorial(unittest.TestCase): """Test the First Steps Tutorial for the classic interface.""" def setUp(self): """Setup test tables or empty them if they already exist.""" db = DB(dbname=dbname, host=dbhost, port=dbport) db.query("set datestyle to 'iso'") db.query("set default_with_oids=false") db.query("set standard_conforming_strings=false") db.query("set client_min_messages=warning") db.query("drop table if exists fruits cascade") db.query("create table fruits(id serial primary key, name varchar)") self.db = db def tearDown(self): db = self.db db.query("drop table fruits") db.close() def test_all_steps(self): db = self.db r = db.get_tables() self.assertIsInstance(r, list) self.assertIn('public.fruits', r) r = db.get_attnames('fruits') self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 'int', 'name': 'text'}) r = db.has_table_privilege('fruits', 'insert') self.assertTrue(r) r = db.insert('fruits', name='apple') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'apple', 'id': 1}) banana = r = db.insert('fruits', name='banana') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'banana', 'id': 2}) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() data = list(enumerate(more_fruits, start=3)) db.inserttable('fruits', data) q = db.query('select * from fruits') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+----------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[8], ' 7|grapefruit') self.assertEqual(r[9], '(7 rows)') q = db.query('select * from fruits') r = q.getresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(r[0], (1, 'apple')) self.assertEqual(r[6], (7, 'grapefruit')) r = q.dictresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], dict) self.assertEqual(r[0], {'id': 1, 'name': 'apple'}) self.assertEqual(r[6], {'id': 7, 'name': 'grapefruit'}) rows = r = q.namedresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(rows[3].name, 'durian') r = db.update('fruits', banana, name=banana['name'].capitalize()) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) q = db.query('select * from fruits where id between 1 and 3') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+---------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[3], ' 2|Banana ') self.assertEqual(r[4], ' 3|cherimaya') self.assertEqual(r[5], '(3 rows)') r = db.query('update fruits set name=initcap(name)') self.assertIsInstance(r, str) self.assertEqual(r, '7') r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 1) r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 0) r = db.insert('fruits', banana) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) apple = r = db.get('fruits', 1) self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'Apple', 'id': 1}) r = db.insert('fruits', apple, id=8) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 8, 'name': 'Apple'}) r = db.delete('fruits', id=8) self.assertIsInstance(r, int) self.assertEqual(r, 1) class TestDbApi20Tutorial(unittest.TestCase): """Test the First Steps Tutorial for the DB-API 2.0 interface.""" def setUp(self): """Setup test tables or empty them if they already exist.""" database = dbname host = '%s:%d' % (dbhost or '', dbport or -1) con = connect(database=database, host=host) cur = con.cursor() cur.execute("set datestyle to 'iso'") cur.execute("set default_with_oids=false") cur.execute("set standard_conforming_strings=false") cur.execute("set client_min_messages=warning") cur.execute("drop table if exists fruits cascade") cur.execute("create table fruits(id serial primary key, name varchar)") cur.close() self.con = con def tearDown(self): con = self.con cur = con.cursor() cur.execute("drop table fruits") cur.close() con.close() def test_all_steps(self): con = self.con cursor = con.cursor() cursor.execute("insert into fruits (name) values ('apple')") cursor.execute("insert into fruits (name) values (%s)", ('banana',)) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() parameters = [(name,) for name in more_fruits] cursor.executemany("insert into fruits (name) values (%s)", parameters) con.commit() cursor.execute('select * from fruits where id=1') r = cursor.fetchone() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 2) r = str(r) self.assertEqual(r, "Row(id=1, name='apple')") cursor.execute('select * from fruits') r = cursor.fetchall() self.assertIsInstance(r, list) self.assertEqual(len(r), 7) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[6]), "Row(id=7, name='grapefruit')") cursor.execute('select * from fruits') r = cursor.fetchmany(2) self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[1]), "Row(id=2, name='banana')") if __name__ == '__main__': unittest.main() pygresql-5.1.2/tox.ini000066400000000000000000000004401365010227600147000ustar00rootroot00000000000000# config file for tox [tox] envlist = py{26,27,33,34,35,36,37,38} [testenv] deps = py26: unittest2 commands = python setup.py clean --all build_ext --force --inplace --strict py26: unit2 discover {posargs} py{27,33,34,35,36,37,38}: python -m unittest discover {posargs}