pax_global_header00006660000000000000000000000064145070635060014520gustar00rootroot0000000000000052 comment=9bc5a1ec81ef15595645012e9493c26fd96333a9 PyGreSQL-PyGreSQL-166b135/000077500000000000000000000000001450706350600150035ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/.clang-format000066400000000000000000000012341450706350600173560ustar00rootroot00000000000000# A clang-format style that approximates Python's PEP 7 # Useful for IDE integration # # Based on Paul Ganssle's version at # https://gist.github.com/pganssle/0e3a5f828b4d07d79447f6ced8e7e4db BasedOnStyle: Google AlwaysBreakAfterReturnType: All AllowShortIfStatementsOnASingleLine: false AlignAfterOpenBracket: Align AlignTrailingComments: true BreakBeforeBraces: Stroustrup ColumnLimit: 79 DerivePointerAlignment: false IndentWidth: 4 Language: Cpp PointerAlignment: Right ReflowComments: true SpaceBeforeParens: ControlStatements SpacesInParentheses: false TabWidth: 4 UseCRLF: false UseTab: Never StatementMacros: - Py_BEGIN_ALLOW_THREADS - Py_END_ALLOW_THREADSPyGreSQL-PyGreSQL-166b135/.devcontainer/000077500000000000000000000000001450706350600175425ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/.devcontainer/dev.env000066400000000000000000000002441450706350600210320ustar00rootroot00000000000000PGHOST=pg15 PGPORT=5432 PGDATABASE=test PGUSER=test PGPASSWORD=test PYGRESQL_DB=test PYGRESQL_HOST=pg15 PYGRESQL_PORT=5432 PYGRESQL_USER=test PYGRESQL_PASSWD=test PyGreSQL-PyGreSQL-166b135/.devcontainer/devcontainer.json000066400000000000000000000040471450706350600231230ustar00rootroot00000000000000// For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu { "name": "PyGreSQL", // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "dockerComposeFile": "docker-compose.yml", "service": "dev", "workspaceFolder": "/workspace", "customizations": { "vscode": { // Set *default* container specific settings.json values on container create. "settings": { "terminal.integrated.profiles.linux": { "bash": { "path": "/bin/bash" } }, "sqltools.connections": [ { "name": "Container database", "driver": "PostgreSQL", "previewLimit": 50, "server": "pg15", "port": 5432, "database": "test", "username": "test", "password": "test" } ], "python.pythonPath": "/usr/local/bin/python", "python.analysis.typeCheckingMode": "basic", "python.testing.unittestEnabled": true, "editor.formatOnSave": true, "editor.renderWhitespace": "all", "editor.rulers": [ 79 ] }, // Add the IDs of extensions you want installed when the container is created. "extensions": [ "ms-azuretools.vscode-docker", "ms-python.python", "ms-vscode.cpptools", "mtxr.sqltools", "njpwerner.autodocstring", "redhat.vscode-yaml", "eamodio.gitlens", "charliermarsh.ruff", "streetsidesoftware.code-spell-checker", "lextudio.restructuredtext" ] } }, // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. "postCreateCommand": "bash /workspace/.devcontainer/provision.sh" // Configure tool-specific properties. // "customizations": {}, // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. // "remoteUser": "root" }PyGreSQL-PyGreSQL-166b135/.devcontainer/provision.sh000066400000000000000000000054041450706350600221310ustar00rootroot00000000000000#!/usr/bin/bash # install development environment for PyGreSQL export DEBIAN_FRONTEND=noninteractive sudo apt-get update sudo apt-get -y upgrade # install base utilities and configure time zone sudo ln -fs /usr/share/zoneinfo/UTC /etc/localtime sudo apt-get install -y apt-utils software-properties-common sudo apt-get install -y tzdata sudo dpkg-reconfigure --frontend noninteractive tzdata sudo apt-get install -y rpm wget zip # install all supported Python versions sudo add-apt-repository -y ppa:deadsnakes/ppa sudo apt-get update sudo apt-get install -y python3.7 python3.7-dev python3.7-distutils sudo apt-get install -y python3.8 python3.8-dev python3.8-distutils sudo apt-get install -y python3.9 python3.9-dev python3.9-distutils sudo apt-get install -y python3.10 python3.10-dev python3.10-distutils sudo apt-get install -y python3.11 python3.11-dev python3.11-distutils sudo apt-get install -y python3.12 python3.12-dev python3.12-distutils # install build and testing tool python3.7 -m pip install -U pip setuptools wheel build python3.8 -m pip install -U pip setuptools wheel build python3.9 -m pip install -U pip setuptools wheel build python3.10 -m pip install -U pip setuptools wheel build python3.11 -m pip install -U pip setuptools wheel build pip install ruff sudo apt-get install -y tox clang-format pip install -U tox # install PostgreSQL client tools sudo apt-get install -y postgresql libpq-dev for pghost in pg10 pg12 pg14 pg15 pg16 do export PGHOST=$pghost export PGDATABASE=postgres export PGUSER=postgres export PGPASSWORD=postgres createdb -E UTF8 -T template0 test createdb -E SQL_ASCII -T template0 test_ascii createdb -E LATIN1 -l C -T template0 test_latin1 createdb -E LATIN9 -l C -T template0 test_latin9 createdb -E ISO_8859_5 -l C -T template0 test_cyrillic psql -c "create user test with password 'test'" psql -c "grant create on database test to test" psql -c "grant create on database test_ascii to test" psql -c "grant create on database test_latin1 to test" psql -c "grant create on database test_latin9 to test" psql -c "grant create on database test_cyrillic to test" psql -c "grant create on schema public to test" test psql -c "grant create on schema public to test" test_ascii psql -c "grant create on schema public to test" test_latin1 psql -c "grant create on schema public to test" test_latin9 psql -c "grant create on schema public to test" test_cyrillic psql -c "create extension hstore" test psql -c "create extension hstore" test_ascii psql -c "create extension hstore" test_latin1 psql -c "create extension hstore" test_latin9 psql -c "create extension hstore" test_cyrillic done export PGDATABASE=test export PGUSER=test export PGPASSWORD=test PyGreSQL-PyGreSQL-166b135/.gitattributes000066400000000000000000000004751450706350600177040ustar00rootroot00000000000000* text=auto eol=lf *.bat text eol=crlf *.css text eol=lf *.html text eol=lf *.ini text eol=lf *.py text eol=lf *.raw text eol=lf *.rst text eol=lf *.sh text eol=lf *.txt text eol=lf *.yml text eol=lf *.gif binary *.ico binary *.jpg binary *.png binary *.exe binary *.so binary *.pdf binary *.gz binary *.zip binary PyGreSQL-PyGreSQL-166b135/.github/000077500000000000000000000000001450706350600163435ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/.github/workflows/000077500000000000000000000000001450706350600204005ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/.github/workflows/docs.yml000066400000000000000000000016261450706350600220600ustar00rootroot00000000000000name: Publish PyGreSQL documentation on: push: branches: - main jobs: build: runs-on: ubuntu-22.04 steps: - name: CHeck out repository uses: actions/checkout@v4 - name: Set up Python 3.11 uses: actions/setup-python@v4 with: python-version: 3.11 - name: Install dependencies run: | sudo apt install libpq-dev python -m pip install --upgrade pip pip install . pip install "sphinx>=7,<8" - name: Create docs with Sphinx run: | cd docs make html - name: Deploy docs to GitHub pages uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_branch: gh-pages publish_dir: docs/_build/html cname: pygresql.org enable_jekyll: false force_orphan: true PyGreSQL-PyGreSQL-166b135/.github/workflows/lint.yml000066400000000000000000000010121450706350600220630ustar00rootroot00000000000000name: Run PyGreSQL quality checks on: push: pull_request: jobs: checks: name: Quality checks run runs-on: ubuntu-22.04 strategy: fail-fast: false steps: - name: Check out repository uses: actions/checkout@v4 - name: Install tox run: pip install tox - name: Setup Python uses: actions/setup-python@v4 with: python-version: 3.11 - name: Run quality checks run: tox -e ruff,mypy,cformat,docs timeout-minutes: 5 PyGreSQL-PyGreSQL-166b135/.github/workflows/tests.yml000066400000000000000000000036301450706350600222670ustar00rootroot00000000000000name: Run PyGreSQL test matrix # this has been shamelessly copied from Psycopg on: push: pull_request: jobs: tests: name: Unit tests run runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: include: - { python: "3.7", postgres: "11" } - { python: "3.8", postgres: "12" } - { python: "3.9", postgres: "13" } - { python: "3.10", postgres: "14" } - { python: "3.11", postgres: "15" } - { python: "3.12", postgres: "16" } # Opposite extremes of the supported Py/PG range, other architecture - { python: "3.7", postgres: "16", architecture: "x86" } - { python: "3.8", postgres: "15", architecture: "x86" } - { python: "3.9", postgres: "14", architecture: "x86" } - { python: "3.10", postgres: "13", architecture: "x86" } - { python: "3.11", postgres: "12", architecture: "x86" } - { python: "3.12", postgres: "11", architecture: "x86" } env: PYGRESQL_DB: test PYGRESQL_HOST: 127.0.0.1 PYGRESQL_USER: test PYGRESQL_PASSWD: test services: postgresql: image: postgres:${{ matrix.postgres }} env: POSTGRES_USER: test POSTGRES_PASSWORD: test ports: - 5432:5432 # Set health checks to wait until postgres has started options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 steps: - name: Check out repository uses: actions/checkout@v4 - name: Install tox run: pip install tox - name: Setup Python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Run tests env: MATRIX_PYTHON: ${{ matrix.python }} run: tox -e py${MATRIX_PYTHON/./} timeout-minutes: 5 PyGreSQL-PyGreSQL-166b135/.gitignore000066400000000000000000000004641450706350600167770ustar00rootroot00000000000000*~ *.bak *.cache *.dll *.egg-info *.log *.patch *.pid *.pstats *.py[co] *.so *.swp __pycache__/ build/ dist/ _build/ _build_doctrees/ /local/ /tests/LOCAL_*.py docker-compose.yml Dockerfile Vagrantfile Vagrantfile-* .coverage .tox/ .venv/ .vagrant/ .vagrant-*/ Thumbs.db .DS_Store .idea/ .vs/ .vscode/ PyGreSQL-PyGreSQL-166b135/.readthedocs.yaml000066400000000000000000000010731450706350600202330ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: os: ubuntu-22.04 tools: python: "3.11" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py # We recommend specifying your dependencies to enable reproducible builds: # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: docs/requirements.txt PyGreSQL-PyGreSQL-166b135/LICENSE.txt000066400000000000000000000025721450706350600166340ustar00rootroot00000000000000Written by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain Further modifications copyright (c) 2009-2023 by the PyGreSQL Development Team PyGreSQL is released under the PostgreSQL License, a liberal Open Source license, similar to the BSD or MIT licenses: Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. PyGreSQL-PyGreSQL-166b135/MANIFEST.in000066400000000000000000000010111450706350600165320ustar00rootroot00000000000000 include setup.py recursive-include pg *.py *.pyi py.typed recursive-include pgdb *.py py.typed recursive-include tests *.py include ext/*.c include ext/*.h include README.rst include LICENSE.txt include tox.ini include pyproject.toml include docs/Makefile include docs/make.bat include docs/*.py include docs/*.rst include docs/*.txt exclude docs/index.rst recursive-include docs/community *.rst recursive-include docs/contents *.rst recursive-include docs/download *.rst recursive-include docs/_static *.ico *.png PyGreSQL-PyGreSQL-166b135/README.rst000066400000000000000000000026671450706350600165050ustar00rootroot00000000000000PyGreSQL - Python interface for PostgreSQL ========================================== PyGreSQL is a Python module that interfaces to a PostgreSQL database. It wraps the lower level C API library libpq to allow easy use of the powerful PostgreSQL features from Python. PyGreSQL should run on most platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre. D'Arcy J. M. Cain renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. Christoph Zwerschke volunteered as another maintainer and has been the main contributor since version 3.7 of PyGreSQL. The following Python versions are supported: * PyGreSQL 4.x and earlier: Python 2 only * PyGreSQL 5.x: Python 2 and Python 3 * PyGreSQL 6.x and newer: Python 3 only The current version of PyGreSQL supports Python versions 3.7 to 3.12 and PostgreSQL versions 10 to 16 on the server. Installation ------------ The simplest way to install PyGreSQL is to type:: $ pip install PyGreSQL For other ways of installing PyGreSQL and requirements, see the documentation. Note that PyGreSQL also requires the libpq shared library to be installed and accessible on the client machine. Documentation ------------- The documentation is available at `pygresql.github.io/ `_ and at `pygresql.readthedocs.io `_, where you can also find the documentation for older versions. PyGreSQL-PyGreSQL-166b135/docs/000077500000000000000000000000001450706350600157335ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/Makefile000066400000000000000000000011721450706350600173740ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) PyGreSQL-PyGreSQL-166b135/docs/_static/000077500000000000000000000000001450706350600173615ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/_static/favicon.ico000066400000000000000000000221561450706350600215100ustar00rootroot00000000000000h6 @@(F( 569:;<=>@AAABDEFHIIJKLMOOQRUVVVXZ]tNNSQSS])W[^*W`,`(a-]b.c/e$d0d/e1b f2g2g3g4l>j1|UlŸMMMMMMF MMMMMMMMMMMM MMMMMMMMMI @@=MMMMMMM7'.1@@@MMMMM @@@MMMM 69@@@MMMM @A @@@MLLM )@@J@@@@@KMMM @@4@@M@>>(+MM@@?@@M>:5%M#3MMMD@@@/><80-H-0MMMM@@@M><80,*-$MMMMMMMMMMMMBEMMMMMMMMMMMMMMMMMM( @44556677899::;;<==>>????@@AAABBCDEFGGHHIIJJJKKKLLLLLMOOOOPPQQRRSSTUUVVVWXYYZZ\]]^^``bceglnnntvMNNOSQSSXSST])TTUUWW[^*^+VXZ[_+WY[\`,a.\`(a-]]^^b.```"c/d e$` d0`c(c*d/e1b f2f1f2g2g3g4i*l>qj1|Uagl®ųʻͽŸǾԱ?E /D  "'ԙ "&6H5 "&+J !#&+_Ԓchl{|%'-4fIKMNPRTUVW*-48 !$'-48< !$(-48<@tt !$(.r@C}Դ  !248>ZSԷG  ":>AFqLs9 ")X[]dw= "%mΦԦQ "&,ԦϜԻԶ "&+YԦԦjv "&+3uԦԦoԠ0&-47ԦԢiԟ148;ԦԢԖԖOB\ԦԢԐԊgԯʃübԥԫ¢e`ԥԦyyԁԭ͂yp¾yЦ~Ԧy^pyԝԨxppyanԳkzyyyԸ(@34689:===?AAEEHHIJLLMNPQRRRRTTTUVVVYYYY[]]]]^``bcdfghlmnnnnoppttvxyzLMFCPTMRWUVXYY])U[^*Y[[\] ^%_,Z[ [\\]```,]]`)Z[\\]]^^^`b.]```!a%b)d e$j0d*d0ad-yRb e-f2g4h3tKg)~\i*l>mAqj/j0j4n>nuzpDo;tHzQ|V}U^aabflkmqqyųƴȶȸʻ̼;ĶŸǾǻȽ0  !)O Q4""_ nQ8{_.&j6 lBA&_gp_H  z""Z&,[ ""&&&'| z55::@=M[ s_[_[ Q_! 3[[x- ʕ}Jf [[𕕕}N 7zjl[Q[ gxޔ[k! [_SSqg< [̔֎SSG[єkQQ*v˔_[[xr{ggrKQ9+?Q}ƅ[aTbKrQޕucQXTd_ؔRKokoKEooo_oooQEooooKƕ_ShooooeYD~ReoooT핕oooee]]]]eeeood_oooee]]]]ED]eeooo_oooee]]YY]]]eeooە䕕oooe]]YYLY]]eeoo__oooe]]YESYY]]eeo\v啕oooe]]DYYY]]eeog_oooe]_YYYY]]eeE瓕oooVYYYY]]]eSQƆU]]]]]]]R[_yy_QwSir`KEe]]]]YEϵooPyGreSQL-PyGreSQL-166b135/docs/_static/pygresql.png000066400000000000000000000227671450706350600217530ustar00rootroot00000000000000PNG  IHDRL\tEXtCreation Time06/05/04MqtIME6C/- pHYs B4gAMA a%dIDATx}`ՙJ,E.lml0lBB3$rBGHt|@ &pi6`l^V:;3ohjW;; ћ{<N>/N0t qS3`LxIJ68O3.;!i5vbgXq,/@trWxaIp˝n_74á8nQF]niƄS m#E{CD:bYu]]K 46d6h$ޔAwCkϞ#-Ǻǻ]N4TUo59Ls&ee z-nz֬' b yx~zpPo|v6cRNKt'9][~cc/Y}q0ȣ 15ye9 &L+/qX=΍_~}M^Ǭn!x@CA'Áe'^zRIC_`7G*lZK(NvzLK=viןftR}{kkizJG&%8;1w8"fZ)Ψ3 '#B4woowI.塼p'x\O[< s2F wt+VS}[syW{20x|׿춆IV?_,]dIJܟW(j,Ꮘ@"~\Z]+J s0 ݰm`JYY' ߹}٪E'c@c[ϔ_"<4fMxjjN~mπƤxD2?掋ᚆj'>vvQ G㓵n$jt`>wRΛ 3{}]Ga!R?VYRw jcIQD{i˾}۲Kf+ ׭S1 z;ѧnD Y|K,}R{-yY6_(8:TmX{A/Ԍ=Z Qg`VO)sXξ6u.W/|\<ь"| nG8!?T.1CyEE>I=3ɢ3qo}=KCgn/;okVtG_WBu];א'G|mif;HyJ2gЦ uk&Tdasf?q@otI*իhW]7mGlAݿ*8sJtrܪ'߫K0¦s;Osf!7" J G4jT.D%8|t+[V'fdk~zS{tY/ga Фʮ}N6P)4:͂ Ywee 9M]/}^[5G&Zz=mz}G[هOjcmzJDs)A(ZV9 >6;`X/Q7n(ө0Vo=٬|KdYc+%EJh>!SO8ګ|]_Li0A; + yYs_@>w5Dx' :˚;:z2/8w"I9^wbJ! ƵE &X ewgBQ QQ&%sȇ}]?ɐBC-;ƒakw~mtYDy@ %BvQKf3V!bwG;3)A)Kj&̛VT))ǿ-4N2jT1.nYt' NaH2dEzͯ|=qgyfiyʿ8VH&Xav!?kʱvќVADJYSKZY>|&lL`=wAldD92!,TMA:RS4Nw_X{Ҽ5g~ۤ=e/]g>>ͬO|ՁO\g y';問rlX'~?^Ұ<})C]DW;VJ 3OHHJ %|\}𵸨*Ϝsؚ>2:d3!Mmz a u~7 ڒlY95uI.K a(͌8Zjb~W*ȇɐkMBzUJh lScoDrn5UfFvل  -Pϩ(0Ow_6i{?plJ0O;4Bid{:mdD=0`})||La\'"XȽ@MQ}׿H: }5̛V<8G:j£ trAQY><ڣ+׾jRn=MÖCtUyr_Swx}聑8o ``PTŤ6Kbɂ!b93Ļ{\aȽ.1i6kDYj3{zqu[*a%P׵eS8A/ER3d lo蒽H)@aN@y0]?:{u ڴ<'d}q[Ha? yyjIAG:Y"ʐpPFc ޙ7X+˲3п ~pjh@pF 0 tM>'"J" ZI͎&"ȳ!ZD)C&C/_2p|d 4Ai:aazY`x9!Z07] :c֜L:!L;x0wy+ k'ѝ zPAJ\?ظNCUu\db (Ǎ4ta "p@B /^2fX;{NEbz_d4w0<ʳTdw#Rj$?60UBHq=Թ`P-JBJ%eT ) dd % 0[bD\ Ăb+B탕IC5& 3}\xCF=:c#K1-f=𠗽xh:| >%FH;/w1xARX{S>09B\j`al.0,Dž¥^ ЅOqyp%8dVT?UJe2`ABH& ZJ "wM% k#*=}L6 u| ߃ymUQ?`vpL.fXr) )d=d'8ARӮ'P3pEg D7."Z^1H&}O%l\x-%Y)ʍ0d,ϥV F68*Qk>)<+J2n08P^e(ǟg-/NX;P|MSCi $Jٴҁ|kQtyhcYa1?]p@FYfF!#W'#E OTg;܆^}:cF 559VgpT\!MÓPgΚQ/7Y?Aw7*ƄrFA/ƍ^y9<,s)pp%dJ }b4V{Y:ѐްh 0>0Rԇ|sg 95D2VMGM;0&SXJ""UTwG#0ͪOd{t3L:yRF .3SWȟ1y]=Jc hsRf~9baBV΃ҡ"߻s!ZZY-^Yq@ĦSzA3[HOVM\\E &φ-փ%sp\ۃE&@N!6f]찟aqouT]̓ Hqo;dx1=i v?by(f1@÷_cUhB` ")I,͞3w«-Ձ)aPmծX\ ]VI4acߤH@p"#R}ќĿo蒥0?7 LH}NCa$,rh|fZ<s=Na^ e>t`TRcD D>n엓*yJFtwe&NF?-">_c]tW y`X31aGy^Qcp^'!͢:x]:Dvy|$.ӲU0s;'S\=l'y}|x~4F? -ֆn_צ0P7q n;@*\};4Hrf n=@RTyG9[S|_JQ 0 u}(Me ϟoD=ߙ&OEe_ vẅW;W=^G,`Vgϴ&"ѥ;,DOӡRsdZ,>wKݼҔ^J:>@dYT򔈓;\5cGIV÷S8 EJj1ɻVp̳c[4& ^iuap(E닒[MdFc#YX{ @iF_-Ga/2'Eg^)Y_MzfX-2w6=Mg_1/)֪44Yng^Hs1g 'qYpN;եK6NiE-,$^.?)Gy"¾X0Yah&˸V&>ލ^i&8C _9,M`LH~C~r5+gVDZ0M=,b fwnvO>odsb.:!A?z P`~ #<(!1i f R{GMU{A7;`a{g1¥y A":Kj';FQnfEa( o kwПbq/;hZ{Kv8pc ҤAPO!&ꨤ0 O (Q&fCÛilrzYfPmu2JҌz8y@9%[b d03a*}^',^ֻiFgèYy vw@I{z <)-laQO&d%DL lC@IY:~G s(쮈k!1-y5/ז?yi F3^o^Ci}ahJaÔj?1Eb<9 /QX[>߻㮎SsEM؏n蒃_}z Pa̲ p݁+=hX(hc Z) ۍڑ^XKF]ꉰc0֗hIFB^p0~R>]ϑ01u @75J94*p<9Da1ࣸ]?z'@(<_eQjE7C)! ¹ƠhFf f2BRQ,j;S!fs33 Ng.1Ic:=K~T dK5  T1~Y<838 #ᔫ_ o< w6T11(a"s*_H6D`;IdG̀GL~c(G҄ u$m}9SHFf(#2J;Fb Lݡm:,i+N^ȅF e@"B? ;ߟ|RpI ZN߱tWDgsr}QC3AҚD'49f$RbSӓW5J&APp! MI*Lў,6!g,3-(-FB?H:e , ҈"IEi~lVa=eQ o` Q뇶ړG̪!lV xu,3:M\ (хfO . ~ąҡr.ȟqsu@L>m,1 tu^VSmjסD#(%7*_⑮DQ j8;[a7z ,Z #!NsNKT\~ +/4? 8t&*ǹnj#=CFʊslظZiG l'$5{>na>HmC8tY3@XyXI+a$R5^R!-t!$8%= _(@ V_أ{׺b|NZpZu.t0 o-2NZ9c320l1V0rriuD[)&Pe IsۡW͎@(ur d>i9-i@޽u0t@ =]`bASpe61f!2 =0t3 4#)(w^TOt F8!hYO "+"6=@MJ EGoĝF恌xp$B*-=&q0D& X0J2xǥ@i(Z^;ClhP"e|G[RsL9}9-d4H3o  d򋿡bP@iXJ9Pz=%%3e/ Ob: PHS1 QүirUԶ {4[%kJ-Ӏe`I@nP`2!R@0U HEtd$CMt qS3`L<IENDB`PyGreSQL-PyGreSQL-166b135/docs/about.rst000066400000000000000000000044321450706350600176020ustar00rootroot00000000000000About PyGreSQL ============== **PyGreSQL** is an *open-source* `Python `_ module that interfaces to a `PostgreSQL `_ database. It wraps the lower level C API library libpq to allow easy use of the powerful PostgreSQL features from Python. | This software is copyright © 1995, Pascal Andre. | Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain. | Further modifications are copyright © 2009-2023 by the PyGreSQL team. | For licensing details, see the full :doc:`copyright`. **PostgreSQL** is a highly scalable, SQL compliant, open source object-relational database management system. With more than 20 years of development history, it is quickly becoming the de facto database for enterprise level open source solutions. Best of all, PostgreSQL's source code is available under the most liberal open source license: the BSD license. **Python** Python is an interpreted, interactive, object-oriented programming language. It is often compared to Tcl, Perl, Scheme or Java. Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++. Python is also usable as an extension language for applications that need a programmable interface. The Python implementation is copyrighted but freely usable and distributable, even for commercial use. **PyGreSQL** is a Python module that interfaces to a PostgreSQL database. It wraps the lower level C API library libpq to allow easy use of the powerful PostgreSQL features from Python. PyGreSQL is developed and tested on a NetBSD system, but it also runs on most other platforms where PostgreSQL and Python is running. It is based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. The current version PyGreSQL |version| needs PostgreSQL 10 to 16, and Python 3.7 to 3.12. If you need to support older PostgreSQL or Python versions, you can resort to the PyGreSQL 5.x versions that still support them. PyGreSQL-PyGreSQL-166b135/docs/community/000077500000000000000000000000001450706350600177575ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/community/homes.rst000066400000000000000000000002401450706350600216200ustar00rootroot00000000000000Project home sites ------------------ **Python**: http://www.python.org **PostgreSQL**: http://www.postgresql.org **PyGreSQL**: http://www.pygresql.orgPyGreSQL-PyGreSQL-166b135/docs/community/index.rst000066400000000000000000000007311450706350600216210ustar00rootroot00000000000000PyGreSQL Development and Support ================================ PyGreSQL is an open-source project created by a group of volunteers. The project and the development infrastructure are currently maintained by D'Arcy J.M. Cain. We would be glad to welcome more contributors so that PyGreSQL can be further developed, modernized and improved. .. include:: mailinglist.rst .. include:: source.rst .. include:: issues.rst .. include:: support.rst .. include:: homes.rst PyGreSQL-PyGreSQL-166b135/docs/community/issues.rst000066400000000000000000000002241450706350600220220ustar00rootroot00000000000000Issue Tracker ------------- Bug reports and enhancement requests can be posted as `GitHub issues `_. PyGreSQL-PyGreSQL-166b135/docs/community/mailinglist.rst000066400000000000000000000005611450706350600230270ustar00rootroot00000000000000Mailing list ------------ You can join `the mailing list `_ to discuss future development of the PyGreSQL interface or if you have questions or problems with PyGreSQL that are not covered in the :doc:`documentation <../contents/index>`. This is usually a low volume list except when there are new features being added. PyGreSQL-PyGreSQL-166b135/docs/community/source.rst000066400000000000000000000014351450706350600220140ustar00rootroot00000000000000Access to the source repository ------------------------------- The source code of PyGreSQL is available as a `Git `_ repository on `GitHub `_. The current main branch of the repository can be cloned with the command:: git clone https://github.com/PyGreSQL/PyGreSQL.git You can also download the main branch as a `zip archive `_. Contributions can be proposed as `pull requests `_ on GitHub. Before starting to work on larger contributions, please discuss with the core developers using the `mailing list `_ or in a `GitHub issues `_. PyGreSQL-PyGreSQL-166b135/docs/community/support.rst000066400000000000000000000014171450706350600222300ustar00rootroot00000000000000Support ------- **Python**: see http://www.python.org/community/ **PostgreSQL**: see http://www.postgresql.org/support/ **PyGreSQL**: Join `the PyGreSQL mailing list `_ if you need help regarding PyGreSQL. You can also ask questions regarding PyGreSQL on `Stack Overflow `_. Please use `GitHub issues `_ only for bug reports and enhancement requests, not for questions about usage of PyGreSQL. Please note that messages to individual developers will generally not be answered directly. All questions, comments and code changes must be submitted to the mailing list for peer review and archiving purposes. PyGreSQL-PyGreSQL-166b135/docs/conf.py000066400000000000000000000062011450706350600172310ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = 'PyGreSQL' author = 'The PyGreSQL team' copyright = '2023, ' + author def project_version(): with open('../pyproject.toml') as f: for d in f: if d.startswith("version ="): version = d.split("=")[1].strip().strip('"') return version raise Exception("Cannot determine PyGreSQL version") version = release = project_version() language = 'en' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = ['sphinx.ext.autodoc'] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # List of pages which are included in other pages and therefore should # not appear in the toctree. exclude_patterns += [ 'download/download.rst', 'download/files.rst', 'community/mailinglist.rst', 'community/source.rst', 'community/issues.rst', 'community/support.rst', 'community/homes.rst'] # ignore certain warnings # (references to some of the Python names do not resolve correctly) nitpicky = True nitpick_ignore = [ ('py:' + t, n) for t, names in { 'attr': ('arraysize', 'error', 'sqlstate', 'DatabaseError.sqlstate'), 'class': ('bool', 'bytes', 'callable', 'callables', 'class', 'dict', 'float', 'function', 'int', 'iterable', 'list', 'object', 'set', 'str', 'tuple', 'False', 'True', 'None', 'namedtuple', 'namedtuples', 'decimal.Decimal', 'bytes/str', 'list of namedtuples', 'tuple of callables', 'first field', 'type of first field', 'Notice', 'DATETIME'), 'data': ('defbase', 'defhost', 'defopt', 'defpasswd', 'defport', 'defuser'), 'exc': ('Exception', 'IndexError', 'IOError', 'KeyError', 'MemoryError', 'SyntaxError', 'TypeError', 'ValueError', 'pg.InternalError', 'pg.InvalidResultError', 'pg.MultipleResultsError', 'pg.NoResultError', 'pg.OperationalError', 'pg.ProgrammingError'), 'func': ('len', 'json.dumps', 'json.loads'), 'meth': ('datetime.strptime', 'cur.execute', 'DB.close', 'DB.connection_handler', 'DB.get_regtypes', 'DB.inserttable', 'DB.reopen'), 'obj': ('False', 'True', 'None') }.items() for n in names] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = 'alabaster' html_static_path = ['_static'] html_title = f'PyGreSQL {version}' html_logo = '_static/pygresql.png' html_favicon = '_static/favicon.ico' PyGreSQL-PyGreSQL-166b135/docs/contents/000077500000000000000000000000001450706350600175705ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/contents/changelog.rst000066400000000000000000001113051450706350600222520ustar00rootroot00000000000000ChangeLog ========= Version 6.0 (2023-10-03) ------------------------ - Tested with the recent releases of Python 3.12 and PostgreSQL 16. - Make pyproject.toml the only source of truth for the version number. - Please also note the changes already made in version 6.0b1. Version 6.0b1 (2023-09-06) -------------------------- - Officially support Python 3.12 and PostgreSQL 16 (tested with rc versions). - Removed support for Python versions older than 3.7 (released June 2017) and PostgreSQL older than version 10 (released October 2017). - Converted the standalone modules `pg` and `pgdb` to packages with several submodules each. The C extension module is now part of the `pg` package and wrapped into the pure Python module `pg.core`. - Added type hints and included a stub file for the C extension module. - Added method `pkeys()` to the `pg.DB` object. - Removed deprecated function `pg.pgnotify()`. - Removed deprecated method `ntuples()` of the `pg.Query` object. - Renamed `pgdb.Type` to `pgdb.DbType` to avoid confusion with `typing.Type`. - `pg` and `pgdb` now use a shared row factory cache. - The function `set_row_factory_size()` has been removed. The row cache is now available as a `RowCache` class with methods `change_size()` and `clear()`. - Modernized code and tools for development, testing, linting and building. Version 5.2.5 (2023-08-28) -------------------------- - This version officially supports the new Python 3.11 and PostgreSQL 15. - Two more improvements in the `inserttable()` method of the `pg` module (thanks to Justin Pryzby for this contribution): - error handling has been improved (#72) - the method now returns the number of inserted rows (#73) - Another improvement in the `pg` module (#83): - generated columns can be requested with the `get_generated()` method - generated columns are ignored by the insert, update and upsert method - Avoid internal query and error when casting the `sql_identifier` type (#82) - Fix issue with multiple calls of `getresult()` after `send_query()` (#80) Version 5.2.4 (2022-03-26) -------------------------- - Three more fixes in the `inserttable()` method of the `pg` module: - `inserttable()` failed to escape carriage return (#68) - Allow larger row sizes up to 64 KB (#69) - Fix use after free issue in `inserttable()` (#71) - Replace obsolete functions for copy used internally (#59). Therefore, `getline()` now does not return `\.` at the end any more. Version 5.2.3 (2022-01-30) -------------------------- - This version officially supports the new Python 3.10 and PostgreSQL 14. - Some improvements and fixes in the `inserttable()` method of the `pg` module: - Sync with `PQendcopy()` when there was an error (#60) - Allow specifying a schema in the table name (#61) - Improved check for internal result (#62) - Catch buffer overflows when building the copy command - Data can now be passed as an iterable, not just list or tuple (#66) - Some more fixes in the `pg` module: - Fix upsert with limited number of columns (#58). - Fix argument handling of `is/set_non_blocking()`. - Add missing `get/set_typecasts` in list of exports. - Fixed a reference counting issue when casting JSON columns (#57). Version 5.2.2 (2020-12-09) -------------------------- - Added a missing adapter method for UUIDs in the classic `pg` module. - Performance optimizations for `fetchmany()` in the `pgdb` module (#51). - Fixed a reference counting issue in the `cast_array/record` methods (#52). - Ignore incompatible libpq.dll in Windows PATH for Python >= 3.8 (#53). Version 5.2.1 (2020-09-25) -------------------------- - This version officially supports the new Python 3.9 and PostgreSQL 13. - The `copy_to()` and `copy_from()` methods in the pgdb module now also work with table names containing schema qualifiers (#47). Version 5.2 (2020-06-21) ------------------------ - We now require Python version 2.7 or 3.5 and newer. - All Python code is now tested with flake8 and made PEP8 compliant. - Changes to the classic PyGreSQL module (pg): - New module level function `get_pqlib_version()` that gets the version of the pqlib used by PyGreSQL (needs PostgreSQL >= 9.1 on the client). - New query method `memsize()` that gets the memory size allocated by the query (needs PostgreSQL >= 12 on the client). - New query method `fieldinfo()` that gets name and type information for one or all field(s) of the query. Contributed by Justin Pryzby (#39). - Experimental support for asynchronous command processing. Additional connection parameter `nowait`, and connection methods `send_query()`, `poll()`, `set_non_blocking()`, `is_non_blocking()`. Generously contributed by Patrick TJ McPhee (#19). - The `types` parameter of `format_query` can now be passed as a string that will be split on whitespace when values are passed as a sequence, and the types can now also be specified using actual Python types instead of type names. Suggested by Justin Pryzby (#38). - The `inserttable()` method now accepts an optional column list that will be passed on to the COPY command. Contributed by Justin Pryzby (#24). - The `DBTypes` class now also includes the `typlen` attribute with information about the size of the type (contributed by Justin Pryzby). - Large objects on the server are not closed any more when they are deallocated as Python objects, since this could cause several problems. Bug report and analysis by Justin Pryzby (#30). - Changes to the DB-API 2 module (pgdb): - When using Python 2, errors are now derived from StandardError instead of Exception, as required by the DB-API 2 compliance test. - Connection arguments containing single quotes caused problems (reported and fixed by Tyler Ramer and Jamie McAtamney). Version 5.1.2 (2020-04-19) -------------------------- - Improved handling of build_ext options for disabling certain features. - Avoid compiler warnings with proper casts. This should solve problems when building PyGreSQL on MaCOS. - Export only the public API on wildcard imports Version 5.1.1 (2020-03-05) -------------------------- - This version officially supports the new Python 3.8 and PostgreSQL 12. - This version changes internal queries so that they cannot be exploited using a PostgreSQL security vulnerability described as CVE-2018-1058. - Removed NO_PQSOCKET switch which is not needed any longer. - Fixed documentation for other compilation options which had been renamed. - Started using GitHub as development platform. Version 5.1 (2019-05-17) ------------------------ - Changes to the classic PyGreSQL module (pg): - Support for prepared statements (following a suggestion and first implementation by Justin Pryzby on the mailing list). - DB wrapper objects based on existing connections can now be closed and reopened properly (but the underlying connection will not be affected). - The query object can now be used as an iterator similar to query.getresult() and will then yield the rows as tuples. Thanks to Justin Pryzby for the proposal and most of the implementation. - Deprecated query.ntuples() in the classic API, since len(query) can now be used and returns the same number. - The i-th row of the result can now be accessed as `query[i]`. - New method query.scalarresult() that gets only the first field of each row as a list of scalar values. - New methods query.one(), query.onenamed(), query.onedict() and query.onescalar() that fetch only one row from the result or None if there are no more rows, similar to the cursor.fetchone() method in DB-API 2. - New methods query.single(), query.singlenamed(), query.singledict() and query.singlescalar() that fetch only one row from the result, and raise an error if the result does not have exactly one row. - New methods query.dictiter(), query.namediter() and query.scalariter() returning the same values as query.dictresult(), query.namedresult() and query.scalarresult(), but as iterables instead of lists. This avoids creating a Python list of all results and can be slightly more efficient. - Removed pg.get/set_namedresult. You can configure the named tuples factory with the pg.set_row_factory_size() function and change the implementation with pg.set_query_helpers(), but this is not recommended and this function is not part of the official API. - Added new connection attributes `socket`, `backend_pid`, `ssl_in_use` and `ssl_attributes` (the latter need PostgreSQL >= 9.5 on the client). - Changes to the DB-API 2 module (pgdb): - Connections now have an `autocommit` attribute which is set to `False` by default but can be set to `True` to switch to autocommit mode where no transactions are started and calling commit() is not required. Note that this is not part of the DB-API 2 standard. Version 5.0.7 (2019-05-17) -------------------------- - This version officially supports the new PostgreSQL 11. - Fixed a bug in parsing array subscript ranges (reported by Justin Pryzby). - Fixed an issue when deleting a DB wrapper object with the underlying connection already closed (bug report by Jacob Champion). Version 5.0.6 (2018-07-29) -------------------------- - This version officially supports the new Python 3.7. - Correct trove classifier for the PostgreSQL License. Version 5.0.5 (2018-04-25) -------------------------- - This version officially supports the new PostgreSQL 10. - The memory for the string with the number of rows affected by a classic pg module query() was already freed (bug report and fix by Peifeng Qiu). Version 5.0.4 (2017-07-23) -------------------------- - This version officially supports the new Python 3.6 and PostgreSQL 9.6. - query_formatted() can now be used without parameters. - The automatic renaming of columns that are invalid as field names of named tuples now works more accurately in Python 2.6 and 3.0. - Fixed error checks for unlink() and export() methods of large objects (bug report by Justin Pryzby). - Fixed a compilation issue under OS X (bug report by Josh Johnston). Version 5.0.3 (2016-12-10) -------------------------- - It is now possible to use a custom array cast function by changing the type caster for the 'anyarray' type. For instance, by calling set_typecast('anyarray', lambda v, c: v) you can have arrays returned as strings instead of lists. Note that in the pg module, you can also call set_array(False) in order to return arrays as strings. - The namedtuple classes used for the rows of query results are now cached and reused internally, since creating namedtuples classes in Python is a somewhat expensive operation. By default the cache has a size of 1024 entries, but this can be changed with the set_row_factory_size() function. In certain cases this change can notably improve the performance. - The namedresult() method in the classic API now also tries to rename columns that would result in invalid field names. Version 5.0.2 (2016-09-13) -------------------------- - Fixed an infinite recursion problem in the DB wrapper class of the classic module that could occur when the underlying connection could not be properly opened (bug report by Justin Pryzby). Version 5.0.1 (2016-08-18) -------------------------- - The update() and delete() methods of the DB wrapper now use the OID instead of the primary key if both are provided. This restores backward compatibility with PyGreSQL 4.x and allows updating the primary key itself if an OID exists. - The connect() function of the DB API 2.0 module now accepts additional keyword parameters such as "application_name" which will be passed on to PostgreSQL. - PyGreSQL now adapts some queries to be able to access older PostgreSQL 8.x databases (as suggested on the mailing list by Andres Mejia). However, these old versions of PostgreSQL are not officially supported and tested any more. - Fixed an issue with Postgres types that have an OID >= 0x80000000 (reported on the mailing list by Justin Pryzby). - Allow extra values that are not used in the command in the parameter dict passed to the query_formatted() method (as suggested by Justin Pryzby). - Improved handling of empty arrays in the classic module. - Unused classic connections were not properly garbage collected which could cause memory leaks (reported by Justin Pryzby). - Made C extension compatible with MSVC 9 again (this was needed to compile for Python 2 on Windows). Version 5.0 (2016-03-20) ------------------------ - This version now runs on both Python 2 and Python 3. - The supported versions are Python 2.6 to 2.7, and 3.3 to 3.5. - PostgreSQL is supported in all versions from 9.0 to 9.5. - Changes in the classic PyGreSQL module (pg): - The classic interface got two new methods get_as_list() and get_as_dict() returning a database table as a Python list or dict. The amount of data returned can be controlled with various parameters. - A method upsert() has been added to the DB wrapper class that utilizes the "upsert" feature that is new in PostgreSQL 9.5. The new method nicely complements the existing get/insert/update/delete() methods. - When using insert/update/upsert(), you can now pass PostgreSQL arrays as lists and PostgreSQL records as tuples in the classic module. - Conversely, when the query method returns a PostgreSQL array, it is passed to Python as a list. PostgreSQL records are converted to named tuples as well, but only if you use one of the get/insert/update/delete() methods. PyGreSQL uses a new fast built-in parser to achieve this. The automatic conversion of arrays to lists can be disabled with set_array(False). - The pkey() method of the classic interface now returns tuples instead of frozensets, with the same order of columns as the primary key index. - Like the DB-API 2 module, the classic module now also returns bool values from the database as Python bool objects instead of strings. You can still restore the old behavior by calling set_bool(False). - Like the DB-API 2 module, the classic module now also returns bytea data fetched from the database as byte strings, so you don't need to call unescape_bytea() any more. This has been made configurable though, and you can restore the old behavior by calling set_bytea_escaped(True). - A method set_jsondecode() has been added for changing or removing the function that automatically decodes JSON data coming from the database. By default, decoding JSON is now enabled and uses the decoder function in the standard library with its default parameters. - The table name that is affixed to the name of the OID column returned by the get() method of the classic interface will not automatically be fully qualified any more. This reduces overhead from the interface, but it means you must always write the table name in the same way when you are using tables with OIDs and call methods that make use of these. Also, OIDs are now only used when access via primary key is not possible. Note that OIDs are considered deprecated anyway, and they are not created by default any more in PostgreSQL 8.1 and later. - The internal caching and automatic quoting of class names in the classic interface has been simplified and improved, it should now perform better and use less memory. Also, overhead for quoting values in the DB wrapper methods has been reduced and security has been improved by passing the values to libpq separately as parameters instead of inline. - It is now possible to use the registered type names instead of the more coarse-grained type names that are used by default in PyGreSQL, without breaking any of the mechanisms for quoting and typecasting, which rely on the type information. This is achieved while maintaining simplicity and backward compatibility by augmenting the type name string objects with all the necessary information under the cover. To switch registered type names on or off (this is the default), call the DB wrapper method use_regtypes(). - A new method query_formatted() has been added to the DB wrapper class that allows using the format specifications from Python. A flag "inline" can be set to specify whether parameters should be sent to the database separately or formatted into the SQL. - A new type helper Bytea() has been added. - Changes in the DB-API 2 module (pgdb): - The DB-API 2 module now always returns result rows as named tuples instead of simply lists as before. The documentation explains how you can restore the old behavior or use custom row objects instead. - Various classes used by the classic and DB-API 2 modules have been renamed to become simpler, more intuitive and in line with the names used in the DB-API 2 documentation. Since the API provides objects of these types only through constructor functions, this should not cause any incompatibilities. - The DB-API 2 module now supports the callproc() cursor method. Note that output parameters are currently not replaced in the return value. - The DB-API 2 module now supports copy operations between data streams on the client and database tables via the COPY command of PostgreSQL. The cursor method copy_from() can be used to copy data from the database to the client, and the cursor method copy_to() can be used to copy data from the client to the database. - The 7-tuples returned by the description attribute of a pgdb cursor are now named tuples, i.e. their elements can be also accessed by name. The column names and types can now also be requested through the colnames and coltypes attributes, which are not part of DB-API 2 though. The type_code provided by the description attribute is still equal to the PostgreSQL internal type name, but now carries some more information in additional attributes. The size, precision and scale information that is part of the description is now properly set for numeric types. - If you pass a Python list as one of the parameters to a DB-API 2 cursor, it is now automatically bound using an ARRAY constructor. If you pass a Python tuple, it is bound using a ROW constructor. This is useful for passing records as well as making use of the IN syntax. - Inversely, when a fetch method of a DB-API 2 cursor returns a PostgreSQL array, it is passed to Python as a list, and when it returns a PostgreSQL composite type, it is passed to Python as a named tuple. PyGreSQL uses a new fast built-in parser to achieve this. Anonymous composite types are also supported, but yield only an ordinary tuple containing text strings. - New type helpers Interval() and Uuid() have been added. - The connection has a new attribute "closed" that can be used to check whether the connection is closed or broken. - SQL commands are always handled as if they include parameters, i.e. literal percent signs must always be doubled. This consistent behavior is necessary for using pgdb with wrappers like SQLAlchemy. - PyGreSQL 5.0 will be supported as a database driver by SQLAlchemy 1.1. - Changes concerning both modules: - PyGreSQL now tries to raise more specific and appropriate subclasses of DatabaseError than just ProgrammingError. Particularly, when database constraints are violated, it raises an IntegrityError now. - The modules now provide get_typecast() and set_typecast() methods allowing to control the typecasting on the global level. The connection objects have type caches with the same methods which give control over the typecasting on the level of the current connection. See the documentation for details about the type cache and the typecast mechanisms provided by PyGreSQL. - Dates, times, timestamps and time intervals are now returned as the corresponding Python objects from the datetime module of the standard library. In earlier versions of PyGreSQL they had been returned as strings. You can restore the old behavior by deactivating the respective typecast functions, e.g. set_typecast('date', str). - PyGreSQL now supports the "uuid" data type, converting such columns automatically to and from Python uuid.UUID objects. - PyGreSQL now supports the "hstore" data type, converting such columns automatically to and from Python dictionaries. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new HStore() type constructor as a hint to PyGreSQL. - PyGreSQL now supports the "json" and "jsonb" data types, converting such columns automatically to and from Python objects. If you want to insert Python objects as JSON data using DB-API 2, you should wrap them in the new Json() type constructor as a hint to PyGreSQL. - A new type helper Literal() for inserting parameters literally as SQL has been added. This is useful for table names, for instance. - Fast parsers cast_array(), cast_record() and cast_hstore for the input and output syntax for PostgreSQL arrays, composite types and the hstore type have been added to the C extension module. The array parser also allows using multi-dimensional arrays with PyGreSQL. - The tty parameter and attribute of database connections has been removed since it is not supported by PostgreSQL versions newer than 7.4. Version 4.2.2 (2016-03-18) -------------------------- - The get_relations() and get_tables() methods now also return system views and tables if you set the optional "system" parameter to True. - Fixed a regression when using temporary tables with DB wrapper methods (thanks to Patrick TJ McPhee for reporting). Version 4.2.1 (2016-02-18) -------------------------- - Fixed a small bug when setting the notice receiver. - Some more minor fixes and re-packaging with proper permissions. Version 4.2 (2016-01-21) ------------------------ - The supported Python versions are 2.4 to 2.7. - PostgreSQL is supported in all versions from 8.3 to 9.5. - Set a better default for the user option "escaping-funcs". - Force build to compile with no errors. - New methods get_parameters() and set_parameters() in the classic interface which can be used to get or set run-time parameters. - New method truncate() in the classic interface that can be used to quickly empty a table or a set of tables. - Fix decimal point handling. - Add option to return boolean values as bool objects. - Add option to return money values as string. - get_tables() does not list information schema tables any more. - Fix notification handler (Thanks Patrick TJ McPhee). - Fix a small issue with large objects. - Minor improvements of the NotificationHandler. - Converted documentation to Sphinx and added many missing parts. - The tutorial files have become a chapter in the documentation. - Greatly improved unit testing, tests run with Python 2.4 to 2.7 again. Version 4.1.1 (2013-01-08) -------------------------- - Add NotificationHandler class and method. Replaces need for pgnotify. - Sharpen test for inserting current_timestamp. - Add more quote tests. False and 0 should evaluate to NULL. - More tests - Any number other than 0 is True. - Do not use positional parameters internally. This restores backward compatibility with version 4.0. - Add methods for changing the decimal point. Version 4.1 (2013-01-01) ------------------------ - Dropped support for Python below 2.5 and PostgreSQL below 8.3. - Added support for Python up to 2.7 and PostgreSQL up to 9.2. - Particularly, support PQescapeLiteral() and PQescapeIdentifier(). - The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors (contribution by Patrick TJ McPhee). - The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher). - The classic API has got the new methods begin(), commit(), rollback(), savepoint() and release() for handling transactions. - Both classic and DBAPI 2 connections can now be used as context managers for encapsulating transactions. - The execute() and executemany() methods now return the cursor object, so you can now write statements like "for row in cursor.execute(...)" (as suggested by Adam Frederick). - Binary objects are now automatically escaped and unescaped. - Bug in money quoting fixed. Amounts of $0.00 handled correctly. - Proper handling of date and time objects as input. - Proper handling of floats with 'nan' or 'inf' values as input. - Fixed the set_decimal() function. - All DatabaseError instances now have a sqlstate attribute. - The getnotify() method can now also return payload strings (#15). - Better support for notice processing with the new methods set_notice_receiver() and get_notice_receiver() (as suggested by Michael Filonenko, see #37). - Open transactions are rolled back when pgdb connections are closed (as suggested by Peter Harris, see #46). - Connections and cursors can now be used with the "with" statement (as suggested by Peter Harris, see #46). - New method use_regtypes() that can be called to let getattnames() return registered type names instead of the simplified classic types (#44). Version 4.0 (2009-01-01) ------------------------ - Dropped support for Python below 2.3 and PostgreSQL below 7.4. - Improved performance of fetchall() for large result sets by speeding up the type casts (as suggested by Peter Schuller). - Exposed exceptions as attributes of the connection object. - Exposed connection as attribute of the cursor object. - Cursors now support the iteration protocol. - Added new method to get parameter settings. - Added customizable row_factory as suggested by Simon Pamies. - Separated between mandatory and additional type objects. - Added keyword args to insert, update and delete methods. - Added exception handling for direct copy. - Start transactions only when necessary, not after every commit(). - Release the GIL while making a connection (as suggested by Peter Schuller). - If available, use decimal.Decimal for numeric types. - Allow DB wrapper to be used with DB-API 2 connections (as suggested by Chris Hilton). - Made private attributes of DB wrapper accessible. - Dropped dependence on mx.DateTime module. - Support for PQescapeStringConn() and PQescapeByteaConn(); these are now also used by the internal _quote() functions. - Added 'int8' to INTEGER types. New SMALLINT type. - Added a way to find the number of rows affected by a query() with the classic pg module by returning it as a string. For single inserts, query() still returns the oid as an integer. The pgdb module already provides the "rowcount" cursor attribute for the same purpose. - Improved getnotify() by calling PQconsumeInput() instead of submitting an empty command. - Removed compatibility code for old OID munging style. - The insert() and update() methods now use the "returning" clause if possible to get all changed values, and they also check in advance whether a subsequent select is possible, so that ongoing transactions won't break if there is no select privilege. - Added "protocol_version" and "server_version" attributes. - Revived the "user" attribute. - The pg module now works correctly with composite primary keys; these are represented as frozensets. - Removed the undocumented and actually unnecessary "view" parameter from the get() method. - get() raises a nicer ProgrammingError instead of a KeyError if no primary key was found. - delete() now also works based on the primary key if no oid available and returns whether the row existed or not. Version 3.8.1 (2006-06-05) -------------------------- - Use string methods instead of deprecated string functions. - Only use SQL-standard way of escaping quotes. - Added the functions escape_string() and escape/unescape_bytea() (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago). - Reverted code in clear() method that set date to current. - Added code for backwards compatibility in OID munging code. - Reorder attnames tests so that "interval" is checked for before "int." - If caller supplies key dictionary, make sure that all has a namespace. Version 3.8 (2006-02-17) ------------------------ - Installed new favicon.ico from Matthew Sporleder - Replaced snprintf by PyOS_snprintf - Removed NO_SNPRINTF switch which is not needed any longer - Clean up some variable names and namespace - Add get_relations() method to get any type of relation - Rewrite get_tables() to use get_relations() - Use new method in get_attnames method to get attributes of views as well - Add Binary type - Number of rows is now -1 after executing no-result statements - Fix some number handling - Non-simple types do not raise an error any more - Improvements to documentation framework - Take into account that nowadays not every table must have an oid column - Simplification and improvement of the inserttable() function - Fix up unit tests - The usual assortment of minor fixes and enhancements Version 3.7 (2005-09-07) ------------------------ Improvement of pgdb module: - Use Python standard `datetime` if `mxDateTime` is not available Major improvements and clean-up in classic pg module: - All members of the underlying connection directly available in `DB` - Fixes to quoting function - Add checks for valid database connection to methods - Improved namespace support, handle `search_path` correctly - Removed old dust and unnecessary imports, added docstrings - Internal sql statements as one-liners, smoothed out ugly code Version 3.6.2 (2005-02-23) -------------------------- - Further fixes to namespace handling Version 3.6.1 (2005-01-11) -------------------------- - Fixes to namespace handling Version 3.6 (2004-12-17) ------------------------ - Better DB-API 2.0 compliance - Exception hierarchy moved into C module and made available to both APIs - Fix error in update method that caused false exceptions - Moved to standard exception hierarchy in classic API - Added new method to get transaction state - Use proper Python constants where appropriate - Use Python versions of strtol, etc. Allows Win32 build. - Bug fixes and cleanups Version 3.5 (2004-08-29) ------------------------ Fixes and enhancements: - Add interval to list of data types - fix up method wrapping especially close() - retry pkeys once if table missing in case it was just added - wrap query method separately to handle debug better - use isinstance instead of type - fix free/PQfreemem issue - finally - miscellaneous cleanups and formatting Version 3.4 (2004-06-02) ------------------------ Some cleanups and fixes. This is the first version where PyGreSQL is moved back out of the PostgreSQL tree. A lot of the changes mentioned below were actually made while in the PostgreSQL tree since their last release. - Allow for larger integer returns - Return proper strings for true and false - Cleanup convenience method creation - Enhance debugging method - Add reopen method - Allow programs to preload field names for speedup - Move OID handling so that it returns long instead of int - Miscellaneous cleanups and formatting Version 3.3 (2001-12-03) ------------------------ A few cleanups. Mostly there was some confusion about the latest version and so I am bumping the number to keep it straight. - Added NUMERICOID to list of returned types. This fixes a bug when returning aggregates in the latest version of PostgreSQL. Version 3.2 (2001-06-20) ------------------------ Note that there are very few changes to PyGreSQL between 3.1 and 3.2. The main reason for the release is the move into the PostgreSQL development tree. Even the WIN32 changes are pretty minor. - Add Win32 support (gerhard@bigfoot.de) - Fix some DB-API quoting problems (niall.smart@ebeon.com) - Moved development into PostgreSQL development tree. Version 3.1 (2000-11-06) ------------------------ - Fix some quoting functions. In particular handle NULLs better. - Use a method to add primary key information rather than direct manipulation of the class structures - Break decimal out in `_quote` (in pg.py) and treat it as float - Treat timestamp like date for quoting purposes - Remove a redundant SELECT from the `get` method speeding it, and `insert` (since it calls `get`) up a little. - Add test for BOOL type in typecast method to `pgdbTypeCache` class (tv@beamnet.de) - Fix pgdb.py to send port as integer to lower level function (dildog@l0pht.com) - Change pg.py to speed up some operations - Allow updates on tables with no primary keys Version 3.0 (2000-05-30) ------------------------ - Remove strlen() call from pglarge_write() and get size from object (Richard@Bouska.cz) - Add a little more error checking to the quote function in the wrapper - Add extra checking in `_quote` function - Wrap query in pg.py for debugging - Add DB-API 2.0 support to pgmodule.c (andre@via.ecp.fr) - Add DB-API 2.0 wrapper pgdb.py (andre@via.ecp.fr) - Correct keyword clash (temp) in tutorial - Clean up layout of tutorial - Return NULL values as None (rlawrence@lastfoot.com) (WARNING: This will cause backwards compatibility issues) - Change None to NULL in insert and update - Change hash-bang lines to use /usr/bin/env - Clearing date should be blank (NULL) not TODAY - Quote backslashes in strings in `_quote` (brian@CSUA.Berkeley.EDU) - Expanded and clarified build instructions (tbryan@starship.python.net) - Make code thread safe (Jerome.Alet@unice.fr) - Add README.distutils (mwa@gate.net & jeremy@cnri.reston.va.us) - Many fixes and increased DB-API compliance by chifungfan@yahoo.com, tony@printra.net, jeremy@alum.mit.edu and others to get the final version ready to release. Version 2.4 (1999-06-15) ------------------------ - Insert returns None if the user doesn't have select permissions on the table. It can (and does) happen that one has insert but not select permissions on a table. - Added ntuples() method to query object (brit@druid.net) - Corrected a bug related to getresult() and the money type - Corrected a bug related to negative money amounts - Allow update based on primary key if munged oid not available and table has a primary key - Add many __doc__ strings (andre@via.ecp.fr) - Get method works with views if key specified Version 2.3 (1999-04-17) ------------------------ - connect.host returns "localhost" when connected to Unix socket (torppa@tuhnu.cutery.fi) - Use `PyArg_ParseTupleAndKeywords` in connect() (torppa@tuhnu.cutery.fi) - fixes and cleanups (torppa@tuhnu.cutery.fi) - Fixed memory leak in dictresult() (terekhov@emc.com) - Deprecated pgext.py - functionality now in pg.py - More cleanups to the tutorial - Added fileno() method - terekhov@emc.com (Mikhail Terekhov) - added money type to quoting function - Compiles cleanly with more warnings turned on - Returns PostgreSQL error message on error - Init accepts keywords (Jarkko Torppa) - Convenience functions can be overridden (Jarkko Torppa) - added close() method Version 2.2 (1998-12-21) ------------------------ - Added user and password support thanks to Ng Pheng Siong (ngps@post1.com) - Insert queries return the inserted oid - Add new `pg` wrapper (C module renamed to _pg) - Wrapped database connection in a class - Cleaned up some of the tutorial. (More work needed.) - Added `version` and `__version__`. Thanks to thilo@eevolute.com for the suggestion. Version 2.1 (1998-03-07) ------------------------ - return fields as proper Python objects for field type - Cleaned up pgext.py - Added dictresult method Version 2.0 (1997-12-23) ------------------------ - Updated code for PostgreSQL 6.2.1 and Python 1.5 - Reformatted code and converted to use full ANSI style prototypes - Changed name to PyGreSQL (from PyGres95) - Changed order of arguments to connect function - Created new type `pgqueryobject` and moved certain methods to it - Added a print function for pgqueryobject - Various code changes - mostly stylistic Version 1.0b (1995-11-04) ------------------------- - Keyword support for connect function moved from library file to C code and taken away from library - Rewrote documentation - Bug fix in connect function - Enhancements in large objects interface methods Version 1.0a (1995-10-30) ------------------------- A limited release. - Module adapted to standard Python syntax - Keyword support for connect function in library file - Rewrote default parameters interface (internal use of strings) - Fixed minor bugs in module interface - Redefinition of error messages Version 0.9b (1995-10-10) ------------------------- The first public release. - Large objects implementation - Many bug fixes, enhancements, ... Version 0.1a (1995-10-07) ------------------------- - Basic libpq functions (SQL access) PyGreSQL-PyGreSQL-166b135/docs/contents/examples.rst000066400000000000000000000012121450706350600221340ustar00rootroot00000000000000Examples ======== I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to. The :doc:`postgres/index` that is part of the PyGreSQL distribution shows some examples of using PostgreSQL with PyGreSQL. Here is a `list of motorcycle rides in Ontario `_ that uses a PostgreSQL database to store the rides. There is a link at the bottom of the page to view the source code. Oleg Broytmann has written a simple example `RGB database demo `_ PyGreSQL-PyGreSQL-166b135/docs/contents/general.rst000066400000000000000000000036771450706350600217540ustar00rootroot00000000000000General PyGreSQL programming information ---------------------------------------- PyGreSQL consists of two parts: the "classic" PyGreSQL interface provided by the :mod:`pg` module and the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. If you use only the standard features of the DB-API 2.0 interface, it will be easier to switch from PostgreSQL to another database for which a DB-API 2.0 compliant interface exists. The "classic" interface may be easier to use for beginners, and it provides some higher-level and PostgreSQL specific convenience methods. .. seealso:: **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. Both Python modules utilize the same low-level C extension, which serves as a wrapper for the "libpq" library, the C API to PostgreSQL. This means you must have the libpq library installed as a shared library on your client computer, in a version that is supported by PyGreSQL. Depending on the client platform, you may have to set environment variables like `PATH` or `LD_LIBRARY_PATH` so that PyGreSQL can find the library. .. warning:: Note that PyGreSQL is not thread-safe on the connection level. Therefore we recommend using `DBUtils `_ for multi-threaded environments, which supports both PyGreSQL interfaces. Another option is using PyGreSQL indirectly as a database driver for the high-level `SQLAlchemy `_ SQL toolkit and ORM, which supports PyGreSQL starting with SQLAlchemy 1.1 and which provides a way to use PyGreSQL in a multi-threaded environment using the concept of "thread local storage". Database URLs for PyGreSQL take this form:: postgresql+pygresql://username:password@host:port/database PyGreSQL-PyGreSQL-166b135/docs/contents/index.rst000066400000000000000000000010531450706350600214300ustar00rootroot00000000000000The PyGreSQL documentation ========================== Contents -------- .. toctree:: :maxdepth: 1 Installing PyGreSQL What's New and History of Changes General PyGreSQL Programming Information First Steps with PyGreSQL The Classic PyGreSQL Interface The DB-API Compliant Interface A PostgreSQL Primer Examples for using PyGreSQL Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` PyGreSQL-PyGreSQL-166b135/docs/contents/install.rst000066400000000000000000000174421450706350600220000ustar00rootroot00000000000000Installation ============ General ------- You must first install Python and PostgreSQL on your system. If you want to access remote databases only, you don't need to install the full PostgreSQL server, but only the libpq C-interface library. On Windows, this library is called ``libpq.dll`` and is for instance contained in the PostgreSQL ODBC driver (search for "psqlodbc"). On Linux, it is called ``libpq.so`` and usually provided in a package called "libpq" or "libpq5". On Windows, you also need to make sure that the directory that contains ``libpq.dll`` is part of your ``PATH`` environment variable. The current version of PyGreSQL has been tested with Python versions 3.7 to 3.12, and PostgreSQL versions 10 to 16. PyGreSQL will be installed as two packages named ``pg`` (for the classic interface) and ``pgdb`` (for the DB API 2 compliant interface). The former also contains a shared library called ``_pg.so`` (on Linux) or a DLL called ``_pg.pyd`` (on Windows) and a stub file ``_pg.pyi`` for this library. Installing with Pip ------------------- This is the most easy way to install PyGreSQL if you have "pip" installed. Just run the following command in your terminal:: pip install PyGreSQL This will automatically try to find and download a distribution on the `Python Package Index `_ that matches your operating system and Python version and install it. Note that you still need to have the libpq interface installed on your system (see the general remarks above). Installing from a Binary Distribution ------------------------------------- If you don't want to use "pip", or "pip" doesn't find an appropriate distribution for your computer, you can also try to manually download and install a distribution. When you download the source distribution, you will need to compile the C extension, for which you need a C compiler installed. If you don't want to install a C compiler or avoid possible problems with the compilation, you can search for a pre-compiled binary distribution of PyGreSQL on the Python Package Index or the PyGreSQL homepage. You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches the Python version you have installed. Install the package as usual on your system. Note that the documentation is currently only included in the source package. Installing from Source ---------------------- If you want to install PyGreSQL from Source, or there is no binary package available for your platform, follow these instructions. Make sure the Python header files and PostgreSQL client and server header files are installed. These come usually with the "devel" packages on Unix systems and the installer executables on Windows systems. If you are using a precompiled PostgreSQL, you will also need the pg_config tool. This is usually also part of the "devel" package on Unix, and will be installed as part of the database server feature on Windows systems. Building and installing with Distutils ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can build and install PyGreSQL using `Distutils `_. Download and unpack the PyGreSQL source tarball if you haven't already done so. Type the following commands to build and install PyGreSQL:: python setup.py install Now you should be ready to use PyGreSQL. You can also run the build step separately if you want to create a distribution to be installed on a different system or explicitly enable or disable certain features. For instance, in order to build PyGreSQL without support for the memory size functions, run:: python setup.py build_ext --no-memory-size By default, PyGreSQL is compiled with support for all features available in the installed PostgreSQL version, and you will get warnings for the features that are not supported in this version. You can also explicitly require a feature in order to get an error if it is not available, for instance: python setup.py build_ext --memory-size You can find out all possible build options with:: python setup.py build_ext --help Alternatively, you can also use the corresponding C preprocessor macros like ``MEMORY_SIZE`` directly (see the next section). Note that if you build PyGreSQL with support for newer features that are not available in the PQLib installed on the runtime system, you may get an error when importing PyGreSQL, since these features are missing in the shared library which will prevent Python from loading it. Compiling Manually ~~~~~~~~~~~~~~~~~~ The source file for compiling the C extension module is pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter. Stand-Alone ^^^^^^^^^^^ * In the directory containing ``pgmodule.c``, run the following command:: cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c where you have to set:: PYINC = path to the Python include files (usually something like /usr/include/python) PGINC = path to the PostgreSQL client include files (something like /usr/include/pgsql or /usr/include/postgresql) PSINC = path to the PostgreSQL server include files (like /usr/include/pgsql/server or /usr/include/postgresql/server) PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib) If you are not sure about the above paths, try something like:: PYINC=`find /usr -name Python.h` PGINC=`find /usr -name libpq-fe.h` PSINC=`find /usr -name postgres.h` PGLIB=`find /usr -name libpq.so` If you have the ``pg_config`` tool installed, you can set:: PGINC=`pg_config --includedir` PSINC=`pg_config --includedir-server` PGLIB=`pg_config --libdir` Some options may be added to this line:: -DMEMORY_SIZE = support memory size function (PostgreSQL 12 or newer) On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * Test the new module. Something like the following should work:: $ python >>> import _pg >>> db = _pg.connect('thilo','localhost') >>> db.query("INSERT INTO test VALUES ('ping','pong')") 18304 >>> db.query("SELECT * FROM test") eins|zwei ----+---- ping|pong (1 row) * Finally, move the ``_pg.so``, ``pg.py``, and ``pgdb.py`` to a directory in your ``PYTHONPATH``. A good place would be ``/usr/lib/python/site-packages`` if your Python modules are in ``/usr/lib/python``. Built-in to Python interpreter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Find the directory where your ``Setup`` file lives (usually in the ``Modules`` subdirectory) in the Python source hierarchy and copy or symlink the ``pgmodule.c`` file there. * Add the following line to your 'Setup' file:: _pg pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq where:: PGINC = path to the PostgreSQL client include files (see above) PSINC = path to the PostgreSQL server include files (see above) PGLIB = path to the PostgreSQL object code libraries (see above) Some options may be added to this line:: -DMEMORY_SIZE = support memory size function (PostgreSQL 12 or newer) On some systems you may need to include ``-lcrypt`` in the list of libraries to make it compile. * If you want a shared module, make sure that the ``shared`` keyword is uncommented and add the above line below it. You used to need to install your shared modules with ``make sharedinstall`` but this no longer seems to be true. * Copy ``pg.py`` to the lib directory where the rest of your modules are. For example, that's ``/usr/local/lib/Python`` on my system. * Rebuild Python from the root directory of the Python source hierarchy by running ``make -f Makefile.pre.in boot`` and ``make && make install``. * For more details read the documentation at the top of ``Makefile.pre.in``. PyGreSQL-PyGreSQL-166b135/docs/contents/pg/000077500000000000000000000000001450706350600201765ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/contents/pg/adaptation.rst000066400000000000000000000413461450706350600230640ustar00rootroot00000000000000Remarks on Adaptation and Typecasting ===================================== .. currentmodule:: pg Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ When you use the higher level methods of the classic :mod:`pg` module like :meth:`DB.insert()` or :meth:`DB.update()`, you don't need to care about adaptation of parameters, since all of this is happening automatically behind the scenes. You only need to consider this issue when creating SQL commands manually and sending them to the database using the :meth:`DB.query` method. Imagine you have created a user login form that stores the login name as *login* and the password as *passwd* and you now want to get the user data for that user. You may be tempted to execute a query like this:: >>> db = pg.DB(...) >>> sql = "SELECT * FROM user_table WHERE login = '%s' AND passwd = '%s'" >>> db.query(sql % (login, passwd)).getresult()[0] This seems to work at a first glance, but you will notice an error as soon as you try to use a login name containing a single quote. Even worse, this error can be exploited through so-called "SQL injection", where an attacker inserts malicious SQL statements into the query that you never intended to be executed. For instance, with a login name something like ``' OR ''='`` the attacker could easily log in and see the user data of another user in the database. One solution for this problem would be to cleanse your input of "dangerous" characters like the single quote, but this is tedious and it is likely that you overlook something or break the application e.g. for users with names like "D'Arcy". A better solution is to use the escaping functions provided by PostgreSQL which are available as methods on the :class:`DB` object:: >>> login = "D'Arcy" >>> db.escape_string(login) "D''Arcy" As you see, :meth:`DB.escape_string` has doubled the single quote which is the right thing to do in SQL. However, there are better ways of passing parameters to the query, without having to manually escape them. If you pass the parameters as positional arguments to :meth:`DB.query`, then PyGreSQL will send them to the database separately, without the need for quoting them inside the SQL command, and without the problems inherent with that process. In this case you must put placeholders of the form ``$1``, ``$2`` etc. in the SQL command in place of the parameters that should go there. For instance:: >>> sql = "SELECT * FROM user_table WHERE login = $1 AND passwd = $2" >>> db.query(sql, login, passwd).getresult()[0] That's much better. So please always keep the following warning in mind: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. If you like the ``%`` format specifications of Python better than the placeholders used by PostgreSQL, there is still a way to use them, via the :meth:`DB.query_formatted` method:: >>> sql = "SELECT * FROM user_table WHERE login = %s AND passwd = %s" >>> db.query_formatted(sql, (login, passwd)).getresult()[0] Note that we need to pass the parameters not as positional arguments here, but as a single tuple. Also note again that we did not use the ``%`` operator of Python to format the SQL string, we just used the ``%s`` format specifications of Python and let PyGreSQL care about the formatting. Even better, you can also pass the parameters as a dictionary if you use the :meth:`DB.query_formatted` method:: >>> sql = """SELECT * FROM user_table ... WHERE login = %(login)s AND passwd = %(passwd)s""" >>> parameters = dict(login=login, passwd=passwd) >>> db.query_formatted(sql, parameters).getresult()[0] Here is another example:: >>> sql = "SELECT 'Hello, ' || %s || '!'" >>> db.query_formatted(sql, (login,)).getresult()[0] You would think that the following even simpler example should work, too: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,)).getresult()[0] ProgrammingError: Could not determine data type of parameter $1 The issue here is that :meth:`DB.query_formatted` by default still uses PostgreSQL parameters, transforming the Python style ``%s`` placeholder into a ``$1`` placeholder, and sending the login name separately from the query. In the query we looked at before, the concatenation with other strings made it clear that it should be interpreted as a string. This simple query however does not give PostgreSQL a clue what data type the ``$1`` placeholder stands for. This is different when you are embedding the login name directly into the query instead of passing it as parameter to PostgreSQL. You can achieve this by setting the *inline* parameter of :meth:`DB.query_formatted`, like so:: >>> sql = "SELECT %s" >>> db.query_formatted(sql, (login,), inline=True).getresult()[0] Another way of making this query work while still sending the parameters separately is to simply cast the parameter values:: >>> sql = "SELECT %s::text" >>> db.query_formatted(sql, (login,), inline=False).getresult()[0] In real world examples you will rarely have to cast your parameters like that, since in an INSERT statement or a WHERE clause comparing the parameter to a table column, the data type will be clear from the context. When binding the parameters to a query, PyGreSQL not only adapts the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> db.query_formatted("SELECT %(array)s::int[]", params).getresult()[0][0] [[1, 2], [3, 4]] Note that again we need to cast the array parameter or use inline parameters only because this simple query does not provide enough context. Also note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. Tuples are adapted as PostgreSQL composite types. If you use inline parameters, they can also be used with the ``IN`` syntax. Let's think of a more real world example again where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> db.query_formatted("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> db.query("SELECT * FROM on_hand").getresult()[0][0] Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) The :meth:`DB.insert` method provides a simpler way to achieve the same:: >>> row = dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99'))} Perhaps we want to use custom Python classes instead of named tuples to hold our values:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '{} (from {}, at ${})'.format( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error. This is because PyGreSQL tries to pass the string representation of the object as a parameter to PostgreSQL, but this is just a human readable string and not useful for PostgreSQL to build a composite type. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_str__``, like so:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '{} (from {}, at ${})'.format( ... self.name, self.supplier_id, self.price) ... ... def __pg_str__(self, typ): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. You can even make these objects adapt to different types in different ways:: >>> class InventoryItem: ... ... ... ... ... def __pg_str__(self, typ): ... if typ == 'text': ... return str(self) ... return (self.name, self.supplier_id, self.price) ... >>> db.query("ALTER TABLE on_hand ADD COLUMN remark varchar") >>> item=InventoryItem('fuzzy dice', 42, 1.99) >>> row = dict(item=item, remark=item, count=1000) >>> db.insert('on_hand', row) {'count': 1000, 'item': inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), 'remark': 'fuzzy dice (from 42, at $1.99)'} There is also another "magic" method ``__pg_repr__`` which does not take the *typ* parameter. That method is used instead of ``__pg_str__`` when passing parameters inline. You must be more careful when using ``__pg_repr__``, because it must return a properly escaped string that can be put literally inside the SQL. The only exception is when you return a tuple or list, because these will be adapted and properly escaped by PyGreSQL again. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via the :meth:`DB.get()`, :meth:`Query.getresult()` and similar methods. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pg.get_typecast('int4') int In the classic PyGreSQL module, the typecasting for these basic types is always done internally by the C extension module for performance reasons. We can set a different typecast function for ``int4``, but it will not become effective, the C module continues to use its internal typecasting. However, we can add new typecast functions for the database types that are not supported by the C module. For example, we can create a typecast function that casts items of the composite PostgreSQL type used as example in the previous section to instances of the corresponding Python class. To do this, at first we get the default typecast function that PyGreSQL has created for the current :class:`DB` connection. This default function casts composite types to named tuples, as we have seen in the section before. We can grab it from the :attr:`DB.dbtypes` object as follows:: >>> cast_tuple = db.dbtypes.get_typecast('inventory_item') Now we can create a new typecast function that converts the tuple to an instance of our custom class:: >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) Finally, we set this typecast function, either globally with :func:`set_typecast`, or locally for the current connection like this:: >>> db.dbtypes.set_typecast('inventory_item', cast_item) Now we can get instances of our custom class directly from the database:: >>> item = db.query("SELECT * FROM on_hand").getresult()[0][0] >>> str(item) 'fuzzy dice (from 42, at $1.99)' Note that some of the typecast functions used by the C module are configurable with separate module level functions, such as :meth:`set_decimal`, :meth:`set_bool` or :meth:`set_jsondecode`. You need to use these instead of :meth:`set_typecast` if you want to change the behavior of the C module. Also note that after changing global typecast functions with :meth:`set_typecast`, you may need to run ``db.dbtypes.reset_typecast()`` to make these changes effective on connections that were already open. As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> q = db.query("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""") >>> q.getresult()[0][0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 14). So let's resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].split(',') ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pg.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python simply using:: >>> circle = db.get_as_dict('circle', scalar=True) The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle['C1'] Circle(Point(2, 3), 3.0) >>> circle['C2'] Circle(Point(1, -1), 4.0) Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-PyGreSQL-166b135/docs/contents/pg/connection.rst000066400000000000000000000651131450706350600230750ustar00rootroot00000000000000Connection -- The connection object =================================== .. currentmodule:: pg .. class:: Connection This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls. .. note:: Some methods give direct access to the connection socket. *Do not use them unless you really know what you are doing.* Some other methods give access to large objects. Refer to the PostgreSQL user manual for more information about these. query -- execute a SQL command string ------------------------------------- .. method:: Connection.query(command, [args]) Execute a SQL command string :param str command: SQL command :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row as an integer. If the query is an update or delete statement, or an insert statement that did not insert exactly one row, or on a table without OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an ``"insert/update ... returning"`` statement), this method returns a :class:`Query`. Otherwise, it returns ``None``. You can use the :class:`Query` object as an iterator that yields all results as tuples, or call :meth:`Query.getresult` to get the result as a list of tuples. Alternatively, you can call :meth:`Query.dictresult` or :meth:`Query.dictiter` if you want to get the rows as dictionaries, or :meth:`Query.namedresult` or :meth:`Query.namediter` if you want to get the rows as named tuples. You can also simply print the :class:`Query` object to show the query results on the console. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data, in which case the values must be supplied separately as a tuple. The values are substituted by the database in such a way that they don't need to be escaped, making this an effective way to pass arbitrary or unknown data without worrying about SQL injection or syntax errors. If you don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the return value for the last command in this case. When the database could not process the query, a :exc:`pg.ProgrammingError` or a :exc:`pg.InternalError` is raised. You can check the ``SQLSTATE`` error code of this error by reading its :attr:`sqlstate` attribute. Example:: name = input("Name? ") phone = con.query("select phone from employees where name=$1", (name,)).getresult() send_query - executes a SQL command string asynchronously --------------------------------------------------------- .. method:: Connection.send_query(command, [args]) Submits a command to the server without waiting for the result(s). :param str command: SQL command :param args: optional parameter values :returns: a query object, as described below :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query This method is much the same as :meth:`Connection.query`, except that it returns without waiting for the query to complete. The database connection cannot be used for other operations until the query completes, but the application can do other things, including executing queries using other database connections. The application can call ``select()`` using the ``fileno`` obtained by the connection's :meth:`Connection.fileno` method to determine when the query has results to return. This method always returns a :class:`Query` object. This object differs from the :class:`Query` object returned by :meth:`Connection.query` in a few ways. Most importantly, when :meth:`Connection.send_query` is used, the application must call one of the result-returning methods such as :meth:`Query.getresult` or :meth:`Query.dictresult` until it either raises an exception or returns ``None``. Otherwise, the database connection will be left in an unusable state. In cases when :meth:`Connection.query` would return something other than a :class:`Query` object, that result will be returned by calling one of the result-returning methods on the :class:`Query` object returned by :meth:`Connection.send_query`. There's one important difference in these result codes: if :meth:`Connection.query` returns `None`, the result-returning methods will return an empty string (`''`). It's still necessary to call a result-returning method until it returns `None`. :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` only work after a call to a result-returning method with a non-``None`` return value. Calling ``len()`` on a :class:`Query` object returns the number of rows of the previous result-returning method. If multiple semi-colon-delimited statements are passed to :meth:`Connection.query`, only the results of the last statement are returned in the :class:`Query` object. With :meth:`Connection.send_query`, all results are returned. Each result set will be returned by a separate call to :meth:`Query.getresult()` or other result-returning methods. .. versionadded:: 5.2 Examples:: name = input("Name? ") query = con.send_query("select phone from employees where name=$1", (name,)) phone = query.getresult() query.getresult() # to close the query # Run two queries in one round trip: # (Note that you cannot use a union here # when the result sets have different row types.) query = con.send_query("select a,b,c from x where d=e; "select e,f from y where g") result_x = query.dictresult() result_y = query.dictresult() query.dictresult() # to close the query # Using select() to wait for the query to be ready: query = con.send_query("select pg_sleep(20)") r, w, e = select([con.fileno(), other, sockets], [], []) if con.fileno() in r: results = query.getresult() query.getresult() # to close the query # Concurrent queries on separate connections: con1 = connect() con2 = connect() s = con1.query("begin; set transaction isolation level repeatable read;" "select pg_export_snapshot();").single() con2.query("begin; set transaction isolation level repeatable read;" f"set transaction snapshot '{s}'") q1 = con1.send_query("select a,b,c from x where d=e") q2 = con2.send_query("select e,f from y where g") r1 = q1.getresult() q1.getresult() r2 = q2.getresult() q2.getresult() con1.query("commit") con2.query("commit") query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: Connection.query_prepared(name, [args]) Execute a prepared statement :param str name: name of the prepared statement :param args: optional parameter values :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This method works exactly like :meth:`Connection.query` except that instead of passing the command itself, you pass the name of a prepared statement. An empty name corresponds to the unnamed statement. You must have previously created the corresponding named or unnamed statement with :meth:`Connection.prepare`, or an :exc:`pg.OperationalError` will be raised. .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: Connection.prepare(name, command) Create a prepared statement :param str name: name of the prepared statement :param str command: SQL command :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for the given command for later execution with the :meth:`Connection.query_prepared` method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise a :exc:`pg.ProgrammingError` is raised if the statement name is already defined in the current database session. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then later be passed to the :meth:`Connection.query_prepared` method separately as a tuple. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: Connection.describe_prepared(name) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 reset -- reset the connection ----------------------------- .. method:: Connection.reset() Reset the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method resets the current database connection. poll - completes an asynchronous connection ------------------------------------------- .. method:: Connection.poll() Complete an asynchronous :mod:`pg` connection and get its state :returns: state of the connection :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection :raises pg.InternalError: some error occurred during pg connection The database connection can be performed without any blocking calls. This allows the application mainline to perform other operations or perhaps connect to multiple databases concurrently. Once the connection is established, it's no different from a connection made using blocking calls. The required steps are to pass the parameter ``nowait=True`` to the :meth:`pg.connect` call, then call :meth:`Connection.poll` until it either returns :const:`POLLING_OK` or raises an exception. To avoid blocking in :meth:`Connection.poll`, use `select()` or `poll()` to wait for the connection to be readable or writable, depending on the return code of the previous call to :meth:`Connection.poll`. The initial state of the connection is :const:`POLLING_WRITING`. The possible states are defined as constants in the :mod:`pg` module (:const:`POLLING_OK`, :const:`POLLING_FAILED`, :const:`POLLING_READING` and :const:`POLLING_WRITING`). .. versionadded:: 5.2 Example:: con = pg.connect('testdb', nowait=True) fileno = con.fileno() rd = [] wt = [fileno] rc = pg.POLLING_WRITING while rc not in (pg.POLLING_OK, pg.POLLING_FAILED): ra, wa, xa = select(rd, wt, [], timeout) if not ra and not wa: timedout() rc = con.poll() if rc == pg.POLLING_READING: rd = [fileno] wt = [] else: rd = [] wt = [fileno] cancel -- abandon processing of current SQL command --------------------------------------------------- .. method:: Connection.cancel() :rtype: None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method requests that the server abandon processing of the current SQL command. close -- close the database connection -------------------------------------- .. method:: Connection.close() Close the :mod:`pg` connection :rtype: None :raises TypeError: too many (any) arguments This method closes the database connection. The connection will be closed in any case when the connection is deleted but this allows you to explicitly close it. It is mainly here to allow the DB-SIG API wrapper to implement a close function. transaction -- get the current transaction state ------------------------------------------------ .. method:: Connection.transaction() Get the current in-transaction status of the server :returns: the current in-transaction status :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection The status returned by this method can be :const:`TRANS_IDLE` (currently idle), :const:`TRANS_ACTIVE` (a command is in progress), :const:`TRANS_INTRANS` (idle, in a valid transaction block), or :const:`TRANS_INERROR` (idle, in a failed transaction block). :const:`TRANS_UNKNOWN` is reported if the connection is bad. The status :const:`TRANS_ACTIVE` is reported only when a query has been sent to the server and not yet completed. parameter -- get a current server parameter setting --------------------------------------------------- .. method:: Connection.parameter(name) Look up a current parameter setting of the server :param str name: the name of the parameter to look up :returns: the current setting of the specified parameter :rtype: str or None :raises TypeError: too many (any) arguments :raises TypeError: invalid connection Certain parameter values are reported by the server automatically at connection startup or whenever their values change. This method can be used to interrogate these settings. It returns the current value of a parameter if known, or *None* if the parameter is not known. You can use this method to check the settings of important parameters such as `server_version`, `server_encoding`, `client_encoding`, `application_name`, `is_superuser`, `session_authorization`, `DateStyle`, `IntervalStyle`, `TimeZone`, `integer_datetimes`, and `standard_conforming_strings`. Values that are not reported by this method can be requested using :meth:`DB.get_parameter`. .. versionadded:: 4.0 date_format -- get the currently used date format ------------------------------------------------- .. method:: Connection.date_format() Look up the date format currently being used by the database :returns: the current date format :rtype: str :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the current date format used by the server. Note that it is cheap to call this method, since there is no database query involved and the setting is also cached internally. You will need the date format when you want to manually typecast dates and timestamps coming from the database instead of using the built-in typecast functions. The date format returned by this method can be directly used with date formatting functions such as :meth:`datetime.strptime`. It is derived from the current setting of the database parameter ``DateStyle``. .. versionadded:: 5.0 fileno -- get the socket used to connect to the database -------------------------------------------------------- .. method:: Connection.fileno() Get the socket used to connect to the database :returns: the socket id of the database connection :rtype: int :raises TypeError: too many (any) arguments :raises TypeError: invalid connection This method returns the underlying socket id used to connect to the database. This is useful for use in select calls, etc. set_non_blocking - set the non-blocking status of the connection ---------------------------------------------------------------- .. method:: set_non_blocking(nb) Set the non-blocking mode of the connection :param bool nb: True to put the connection into non-blocking mode. False to put it into blocking mode. :raises TypeError: too many parameters :raises TypeError: invalid connection Puts the socket connection into non-blocking mode or into blocking mode. This affects copy commands and large object operations, but not queries. .. versionadded:: 5.2 is_non_blocking - report the blocking status of the connection -------------------------------------------------------------- .. method:: is_non_blocking() get the non-blocking mode of the connection :returns: True if the connection is in non-blocking mode. False if it is in blocking mode. :rtype: bool :raises TypeError: too many parameters :raises TypeError: invalid connection Returns True if the connection is in non-blocking mode, False otherwise. .. versionadded:: 5.2 getnotify -- get the last notify from the server ------------------------------------------------ .. method:: Connection.getnotify() Get the last notify from the server :returns: last notify from server :rtype: tuple, None :raises TypeError: too many parameters :raises TypeError: invalid connection This method tries to get a notify from the server (from the SQL statement NOTIFY). If the server returns no notify, the methods returns None. Otherwise, it returns a tuple (triplet) *(relname, pid, extra)*, where *relname* is the name of the notify, *pid* is the process id of the connection that triggered the notify, and *extra* is a payload string that has been sent with the notification. Remember to do a listen query first, otherwise :meth:`Connection.getnotify` will always return ``None``. .. versionchanged:: 4.1 Support for payload strings was added in version 4.1. inserttable -- insert an iterable into a table ---------------------------------------------- .. method:: Connection.inserttable(table, values, [columns]) Insert a Python iterable into a database table :param str table: the table name :param list values: iterable of row values, which must be lists or tuples :param list columns: list or tuple of column names :rtype: int :raises TypeError: invalid connection, bad argument type, or too many arguments :raises MemoryError: insert buffer could not be allocated :raises ValueError: unsupported values This method allows to *quickly* insert large blocks of data in a table. Internally, it uses the COPY command of the PostgreSQL database. The method takes an iterable of row values which must be tuples or lists of the same size, containing the values for each inserted row. These may contain string, integer, long or double (real) values. ``columns`` is an optional tuple or list of column names to be passed on to the COPY command. The number of rows affected is returned. .. warning:: This method doesn't type check the fields according to the table definition; it just looks whether or not it knows how to handle such types. get/set_cast_hook -- fallback typecast function ----------------------------------------------- .. method:: Connection.get_cast_hook() Get the function that handles all external typecasting :returns: the current external typecast function :rtype: callable, None :raises TypeError: too many (any) arguments This returns the callback function used by PyGreSQL to provide plug-in Python typecast functions for the connection. .. versionadded:: 5.0 .. method:: Connection.set_cast_hook(func) Set a function that will handle all external typecasting :param func: the function to be used as a callback :rtype: None :raises TypeError: the specified notice receiver is not callable This methods allows setting a custom fallback function for providing Python typecast functions for the connection to supplement the C extension module. If you set this function to *None*, then only the typecast functions implemented in the C extension module are enabled. You normally would not want to change this. Instead, you can use :func:`get_typecast` and :func:`set_typecast` to add or change the plug-in Python typecast functions. .. versionadded:: 5.0 get/set_notice_receiver -- custom notice receiver ------------------------------------------------- .. method:: Connection.get_notice_receiver() Get the current notice receiver :returns: the current notice receiver callable :rtype: callable, None :raises TypeError: too many (any) arguments This method gets the custom notice receiver callback function that has been set with :meth:`Connection.set_notice_receiver`, or ``None`` if no custom notice receiver has ever been set on the connection. .. versionadded:: 4.1 .. method:: Connection.set_notice_receiver(func) Set a custom notice receiver :param func: the custom notice receiver callback function :rtype: None :raises TypeError: the specified notice receiver is not callable This method allows setting a custom notice receiver callback function. When a notice or warning message is received from the server, or generated internally by libpq, and the message level is below the one set with ``client_min_messages``, the specified notice receiver function will be called. This function must take one parameter, the :class:`Notice` object, which provides the following read-only attributes: .. attribute:: Notice.pgcnx the connection .. attribute:: Notice.message the full message with a trailing newline .. attribute:: Notice.severity the level of the message, e.g. 'NOTICE' or 'WARNING' .. attribute:: Notice.primary the primary human-readable error message .. attribute:: Notice.detail an optional secondary error message .. attribute:: Notice.hint an optional suggestion what to do about the problem .. versionadded:: 4.1 putline -- write a line to the server socket -------------------------------------------- .. method:: Connection.putline(line) Write a line to the server socket :param str line: line to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters This method allows to directly write a string to the server socket. getline -- get a line from server socket ---------------------------------------- .. method:: Connection.getline() Get a line from server socket :returns: the line read :rtype: str :raises TypeError: invalid connection :raises TypeError: too many parameters :raises MemoryError: buffer overflow This method allows to directly read a string from the server socket. endcopy -- synchronize client and server ---------------------------------------- .. method:: Connection.endcopy() Synchronize client and server :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters The use of direct access methods may desynchronize client and server. This method ensure that client and server will be synchronized. locreate -- create a large object in the database ------------------------------------------------- .. method:: Connection.locreate(mode) Create a large object in the database :param int mode: large object create mode :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises pg.OperationalError: creation error This method creates a large object in the database. The mode can be defined by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, and :const:`INV_WRITE`). Please refer to PostgreSQL user manual for a description of the mode values. getlo -- build a large object from given oid -------------------------------------------- .. method:: Connection.getlo(oid) Create a large object in the database :param int oid: OID of the existing large object :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: bad OID value (0 is invalid_oid) This method allows reusing a previously created large object through the :class:`LargeObject` interface, provided the user has its OID. loimport -- import a file to a large object ------------------------------------------- .. method:: Connection.loimport(name) Import a file to a large object :param str name: the name of the file to be imported :returns: object handling the PostgreSQL large object :rtype: :class:`LargeObject` :raises TypeError: invalid connection, bad argument type, or too many arguments :raises pg.OperationalError: error during file import This methods allows to create large objects in a very simple way. You just give the name of a file containing the data to be used. Object attributes ----------------- Every :class:`Connection` defines a set of read-only attributes that describe the connection and its status. These attributes are: .. attribute:: Connection.host the host name of the server (str) .. attribute:: Connection.port the port of the server (int) .. attribute:: Connection.db the selected database (str) .. attribute:: Connection.options the connection options (str) .. attribute:: Connection.user user name on the database system (str) .. attribute:: Connection.protocol_version the frontend/backend protocol being used (int) .. versionadded:: 4.0 .. attribute:: Connection.server_version the backend version (int, e.g. 150400 for 15.4) .. versionadded:: 4.0 .. attribute:: Connection.status the status of the connection (int: 1 = OK, 0 = bad) .. attribute:: Connection.error the last warning/error message from the server (str) .. attribute:: Connection.socket the file descriptor number of the connection socket to the server (int) .. versionadded:: 5.1 .. attribute:: Connection.backend_pid the PID of the backend process handling this connection (int) .. versionadded:: 5.1 .. attribute:: Connection.ssl_in_use this is True if the connection uses SSL, False if not .. versionadded:: 5.1 .. attribute:: Connection.ssl_attributes SSL-related information about the connection (dict) .. versionadded:: 5.1 PyGreSQL-PyGreSQL-166b135/docs/contents/pg/db_types.rst000066400000000000000000000075671450706350600225600ustar00rootroot00000000000000DbTypes -- The internal cache for database types ================================================ .. currentmodule:: pg .. class:: DbTypes .. versionadded:: 5.0 The :class:`DbTypes` object is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to PyGreSQL "type names" (which are also returned by :meth:`DB.get_attnames` as dictionary values). These type names are strings which are equal to either the simple PyGreSQL names or to the more fine-grained registered PostgreSQL type names if these have been enabled with :meth:`DB.use_regtypes`. Type names are strings that are augmented with additional information about the associated PostgreSQL type that can be inspected using the following attributes: - *oid* -- the PostgreSQL type OID - *pgtype* -- the internal PostgreSQL data type name - *regtype* -- the registered PostgreSQL data type name - *simple* -- the more coarse-grained PyGreSQL type name - *typlen* -- internal size of the type, negative if variable - *typtype* -- `b` = base type, `c` = composite type etc. - *category* -- `A` = Array, `b` =Boolean, `C` = Composite etc. - *delim* -- delimiter for array types - *relid* -- corresponding table for composite types - *attnames* -- attributes for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`DbTypes` class also provides the following methods: .. method:: DbTypes.get_attnames(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: an ordered dictionary mapping field names to type names .. method:: DbTypes.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. method:: DbTypes.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: DbTypes.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or list of type names, or None to reset all typecast functions :type typ: str, list or None .. method:: DbTypes.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that :class:`DbTypes` object is always bound to a database connection. You can also get and set and reset typecast functions on a global level using the functions :func:`pg.get_typecast` and :func:`pg.set_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless you reset the typecast functions by calling the :meth:`DbTypes.reset_typecast` method on :attr:`DB.dbtypes` objects of the running connections. Also note that the typecasting for all of the basic types happens already in the C low-level extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension. PyGreSQL-PyGreSQL-166b135/docs/contents/pg/db_wrapper.rst000066400000000000000000001161071450706350600230630ustar00rootroot00000000000000The DB wrapper class ==================== .. currentmodule:: pg .. class:: DB The :class:`Connection` methods are wrapped in the class :class:`DB` which also adds convenient higher level methods for working with the database. It also serves as a context manager for the connection. The preferred way to use this module is as follows:: import pg with pg.DB(...) as db: # for parameters, see below for r in db.query( # just for example "SELECT foo, bar FROM foo_bar_table WHERE foo !~ bar" ).dictresult(): print('{foo} {bar}'.format(**r)) This class can be subclassed as in this example:: import pg class DB_ride(pg.DB): """Ride database wrapper This class encapsulates the database functions and the specific methods for the ride database.""" def __init__(self): """Open a database connection to the rides database""" pg.DB.__init__(self, dbname='ride') self.query("SET DATESTYLE TO 'ISO'") [Add or override methods here] The following describes the methods and variables of this class. Initialization -------------- The :class:`DB` class is initialized with the same arguments as the :func:`connect` function described above. It also initializes a few internal variables. The statement ``db = DB()`` will open the local database with the name of the user just like ``connect()`` does. You can also initialize the DB class with an existing :mod:`pg` or :mod:`pgdb` connection. Pass this connection as a single unnamed parameter, or as a single parameter named ``db``. This allows you to use all of the methods of the DB class with a DB-API 2 compliant connection. Note that the :meth:`DB.close` and :meth:`DB.reopen` methods are inoperative in this case. pkey -- return the primary key of a table ----------------------------------------- .. method:: DB.pkey(table) Return the primary key of a table :param str table: name of table :returns: Name of the field that is the primary key of the table :rtype: str :raises KeyError: the table does not have a primary key This method returns the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. pkeys -- return the primary keys of a table ------------------------------------------- .. method:: DB.pkeys(table) Return the primary keys of a table as a tuple :param str table: name of table :returns: Names of the fields that are the primary keys of the table :rtype: tuple :raises KeyError: the table does not have a primary key This method returns the primary keys of a table as a tuple, i.e. single primary keys are also returned as a tuple with one item. Note that this raises a KeyError if the table does not have a primary key. .. versionadded:: 6.0 get_databases -- get list of databases in the system ---------------------------------------------------- .. method:: DB.get_databases() Get the list of databases in the system :returns: all databases in the system :rtype: list Although you can do this with a simple select, it is added here for convenience. get_relations -- get list of relations in connected database ------------------------------------------------------------ .. method:: DB.get_relations([kinds], [system]) Get the list of relations in connected database :param str kinds: a string or sequence of type letters :param bool system: whether system relations should be returned :returns: all relations of the given kinds in the database :rtype: list This method returns the list of relations in the connected database. Although you can do this with a simple select, it is added here for convenience. You can select which kinds of relations you are interested in by passing type letters in the `kinds` parameter. The type letters are ``r`` = ordinary table, ``i`` = index, ``S`` = sequence, ``v`` = view, ``c`` = composite type, ``s`` = special, ``t`` = TOAST table. If `kinds` is None or an empty string, all relations are returned (this is also the default). If `system` is set to `True`, then system tables and views (temporary tables, toast tables, catalog views and tables) will be returned as well, otherwise they will be ignored. get_tables -- get list of tables in connected database ------------------------------------------------------ .. method:: DB.get_tables([system]) Get the list of tables in connected database :param bool system: whether system tables should be returned :returns: all tables in connected database :rtype: list This is a shortcut for ``get_relations('r', system)`` that has been added for convenience. get_attnames -- get the attribute names of a table -------------------------------------------------- .. method:: DB.get_attnames(table) Get the attribute names of a table :param str table: name of table :returns: an ordered dictionary mapping attribute names to type names Given the name of a table, digs out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. By default, only a limited number of simple types will be returned. You can get the registered types instead, if enabled by calling the :meth:`DB.use_regtypes` method. get_generated -- get the generated columns of a table ----------------------------------------------------- .. method:: DB.get_generated(table) Get the generated columns of a table :param str table: name of table :returns: an frozenset of column names Given the name of a table, digs out the set of generated columns. .. versionadded:: 5.2.5 has_table_privilege -- check table privilege -------------------------------------------- .. method:: DB.has_table_privilege(table, privilege) Check whether current user has specified table privilege :param str table: the name of the table :param str privilege: privilege to be checked -- default is 'select' :returns: whether current user has specified table privilege :rtype: bool Returns True if the current user has the specified privilege for the table. .. versionadded:: 4.0 get/set_parameter -- get or set run-time parameters ---------------------------------------------------- .. method:: DB.get_parameter(parameter) Get the value of run-time parameters :param parameter: the run-time parameter(s) to get :type param: str, tuple, list or dict :returns: the current value(s) of the run-time parameter(s) :rtype: str, list or dict :raises TypeError: Invalid parameter type(s) :raises pg.ProgrammingError: Invalid parameter name(s) If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name ``'all'`` as the parameter, you can get a dict of all existing configuration parameters. Note that you can request most of the important parameters also using :meth:`Connection.parameter()` which does not involve a database query, unlike :meth:`DB.get_parameter` and :meth:`DB.set_parameter`. .. versionadded:: 4.2 .. method:: DB.set_parameter(parameter, [value], [local]) Set the value of run-time parameters :param parameter: the run-time parameter(s) to set :type param: string, tuple, list or dict :param value: the value to set :type param: str or None :raises TypeError: Invalid parameter type(s) :raises ValueError: Invalid value argument(s) :raises pg.ProgrammingError: Invalid parameter name(s) or values If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or *None* is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name ``'all'`` as the parameter, you can reset all existing settable run-time parameters to their default values. If you set *local* to `True`, then the command takes effect for only the current transaction. After :meth:`DB.commit` or :meth:`DB.rollback`, the session-level setting takes effect again. Setting *local* to `True` will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. .. versionadded:: 4.2 begin/commit/rollback/savepoint/release -- transaction handling --------------------------------------------------------------- .. method:: DB.begin([mode]) Begin a transaction :param str mode: an optional transaction mode such as 'READ ONLY' This initiates a transaction block, that is, all following queries will be executed in a single transaction until :meth:`DB.commit` or :meth:`DB.rollback` is called. .. versionadded:: 4.1 .. method:: DB.start() This is the same as the :meth:`DB.begin` method. .. method:: DB.commit() Commit a transaction This commits the current transaction. .. method:: DB.end() This is the same as the :meth:`DB.commit` method. .. versionadded:: 4.1 .. method:: DB.rollback([name]) Roll back a transaction :param str name: optionally, roll back to the specified savepoint This rolls back the current transaction, discarding all its changes. .. method:: DB.abort() This is the same as the :meth:`DB.rollback` method. .. versionadded:: 4.2 .. method:: DB.savepoint(name) Define a new savepoint :param str name: the name to give to the new savepoint This establishes a new savepoint within the current transaction. .. versionadded:: 4.1 .. method:: DB.release(name) Destroy a savepoint :param str name: the name of the savepoint to destroy This destroys a savepoint previously defined in the current transaction. .. versionadded:: 4.1 get -- get a row from a database table or view ---------------------------------------------- .. method:: DB.get(table, row, [keyname]) Get a row from a database table or view :param str table: name of table or view :param row: either a dictionary or the value to be looked up :param str keyname: name of field to use as key (optional) :returns: A dictionary - the keys are the attribute names, the values are the row values. :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row This method is the basic mechanism to get a single row. It assumes that the *keyname* specifies a unique row. It must be the name of a single column or a tuple of column names. If *keyname* is not specified, then the primary key for the table is used. If *row* is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed *keyname* or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values if the row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as ``oid(table)`` using the actual name of the table. Note that since PyGreSQL 5.0 this will return the value of an array type column as a Python list by default. insert -- insert a row into a database table -------------------------------------------- .. method:: DB.insert(table, [row], [col=val, ...]) Insert a row into a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the inserted values in the database :rtype: dict :raises pg.ProgrammingError: missing privilege or conflict This method inserts a row into a table. If the optional dictionary is not supplied then the required values must be included as keyword/value pairs. If a dictionary is supplied then any keywords provided will be added to or replace the entry in the dictionary. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. Note that since PyGreSQL 5.0 it is possible to insert a value for an array type column by passing it as a Python list. update -- update a row in a database table ------------------------------------------ .. method:: DB.update(table, [row], [col=val, ...]) Update a row in a database table :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege :raises KeyError: missing key value for the row Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. Like insert, the dictionary is optional and updates will be performed on the fields in the keywords. There must be an OID or primary key either specified using the ``'oid'`` keyword or in the dictionary, in which case the OID must be munged. upsert -- insert a row with conflict resolution ----------------------------------------------- .. method:: DB.upsert(table, [row], [col=val, ...]) Insert a row into a database table with conflict resolution :param str table: name of table :param dict row: optional dictionary of values :param col: optional keyword arguments for specifying the update :returns: the new row in the database :rtype: dict :raises pg.ProgrammingError: table has no primary key or missing privilege This method inserts a row into a table, but instead of raising a ProgrammingError exception in case of violating a constraint or unique index, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of `False` or `None` means the column shall not be updated, a value of `True` means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value `True`. So if in the case of a conflict you want to update every column that has been passed in the dictionary `d` , you would call ``upsert(table, d)``. If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call ``upsert(table, d, **dict.fromkeys(d))``. If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by writing the table prefix ``included.`` before the column name, and you can refer to the value that has been proposed for insertion by writing ``excluded.`` as table prefix. The dictionary is modified in any case to reflect the values in the database after the operation has completed. .. note:: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. With older PostgreSQL versions, you will get a ProgrammingError if you use this method. .. versionadded:: 5.0 query -- execute a SQL command string ------------------------------------- .. method:: DB.query(command, [arg1, [arg2, ...]]) Execute a SQL command string :param str command: SQL command :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to the :class:`Connection` function with the same name, except that positional arguments can be passed either as a single list or tuple, or as individual positional arguments. These arguments will then be used as parameter values of parameterized queries. Example:: name = input("Name? ") phone = input("Phone? ") num_rows = db.query("update employees set phone=$2 where name=$1", name, phone) # or num_rows = db.query("update employees set phone=$2 where name=$1", (name, phone)) query_formatted -- execute a formatted SQL command string --------------------------------------------------------- .. method:: DB.query_formatted(command, [parameters], [types], [inline]) Execute a formatted SQL command string :param str command: SQL command :param parameters: the values of the parameters for the SQL command :type parameters: tuple, list or dict :param types: optionally, the types of the parameters :type types: tuple, list or dict :param bool inline: whether the parameters should be passed in the SQL :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing Similar to :meth:`DB.query`, but using Python format placeholders of the form ``%s`` or ``%(names)s`` instead of PostgreSQL placeholders of the form ``$1``. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set *inline* to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. If you set *inline* to True or don't pass any parameters, the command string can also include multiple SQL commands (separated by semicolons). You will only get the result for the last command in this case. Note that the adaptation and conversion of the parameters causes a certain performance overhead. Depending on the type of values, the overhead can be smaller for *inline* queries or if you pass the types of the parameters, so that they don't need to be guessed from the values. For best performance, we recommend using a raw :meth:`DB.query` or :meth:`DB.query_prepared` if you are executing many of the same operations with different parameters. Example:: name = input("Name? ") phone = input("Phone? ") num_rows = db.query_formatted( "update employees set phone=%s where name=%s", (phone, name)) # or num_rows = db.query_formatted( "update employees set phone=%(phone)s where name=%(name)s", dict(name=name, phone=phone)) Example with specification of types:: db.query_formatted( "update orders set info=%s where id=%s", ({'customer': 'Joe', 'product': 'beer'}, 'id': 7), types=('json', 'int')) # or db.query_formatted( "update orders set info=%s where id=%s", ({'customer': 'Joe', 'product': 'beer'}, 'id': 7), types=('json int')) # or db.query_formatted( "update orders set info=%(info)s where id=%(id)s", {'info': {'customer': 'Joe', 'product': 'beer'}, 'id': 7}, types={'info': 'json', 'id': 'int'}) query_prepared -- execute a prepared statement ---------------------------------------------- .. method:: DB.query_prepared(name, [arg1, [arg2, ...]]) Execute a prepared statement :param str name: name of the prepared statement :param arg*: optional positional arguments :returns: result values :rtype: :class:`Query`, None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises ValueError: empty SQL query or lost connection :raises pg.ProgrammingError: error in query :raises pg.InternalError: error during query processing :raises pg.OperationalError: prepared statement does not exist This methods works like the :meth:`DB.query` method, except that instead of passing the SQL command, you pass the name of a prepared statement created previously using the :meth:`DB.prepare` method. Passing an empty string or *None* as the name will execute the unnamed statement (see warning about the limited lifetime of the unnamed statement in :meth:`DB.prepare`). The functionality of this method is equivalent to that of the SQL ``EXECUTE`` command. Note that calling EXECUTE would require parameters to be sent inline, and be properly sanitized (escaped, quoted). .. versionadded:: 5.1 prepare -- create a prepared statement -------------------------------------- .. method:: DB.prepare(name, command) Create a prepared statement :param str command: SQL command :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument types, or wrong number of arguments :raises TypeError: invalid connection :raises pg.ProgrammingError: error in query or duplicate query This method creates a prepared statement with the specified name for later execution of the given command with the :meth:`DB.query_prepared` method. If the name is empty or *None*, the unnamed prepared statement is used, in which case any pre-existing unnamed statement is replaced. Otherwise, if a prepared statement with the specified name is already defined in the current database session, a :exc:`pg.ProgrammingError` is raised. The SQL command may optionally contain positional parameters of the form ``$1``, ``$2``, etc instead of literal data. The corresponding values must then be passed to the :meth:`Connection.query_prepared` method as positional arguments. The functionality of this method is equivalent to that of the SQL ``PREPARE`` command. Example:: db.prepare('change phone', "update employees set phone=$2 where ein=$1") while True: ein = input("Employee ID? ") if not ein: break phone = input("Phone? ") db.query_prepared('change phone', ein, phone) .. note:: We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations on the database. .. versionadded:: 5.1 describe_prepared -- describe a prepared statement -------------------------------------------------- .. method:: DB.describe_prepared([name]) Describe a prepared statement :param str name: name of the prepared statement :rtype: :class:`Query` :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method returns a :class:`Query` object describing the prepared statement with the given name. You can also pass an empty name in order to describe the unnamed statement. Information on the fields of the corresponding query can be obtained through the :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. .. versionadded:: 5.1 delete_prepared -- delete a prepared statement ---------------------------------------------- .. method:: DB.delete_prepared([name]) Delete a prepared statement :param str name: name of the prepared statement :rtype: None :raises TypeError: bad argument type, or too many arguments :raises TypeError: invalid connection :raises pg.OperationalError: prepared statement does not exist This method deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are always deallocated automatically when the current session ends. .. versionadded:: 5.1 clear -- clear row values in memory ----------------------------------- .. method:: DB.clear(table, [row]) Clear row values in memory :param str table: name of table :param dict row: optional dictionary of values :returns: an empty row :rtype: dict This method clears all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to *False*, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. If the dictionary is not supplied a new one is created. delete -- delete a row from a database table -------------------------------------------- .. method:: DB.delete(table, [row], [col=val, ...]) Delete a row from a database table :param str table: name of table :param dict d: optional dictionary of values :param col: optional keyword arguments for updating the dictionary :rtype: None :raises pg.ProgrammingError: table has no primary key, row is still referenced or missing privilege :raises KeyError: missing key value for the row This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by :meth:`DB.get` or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method will raise a ProgrammingError. truncate -- quickly empty database tables ----------------------------------------- .. method:: DB.truncate(table, [restart], [cascade], [only]) Empty a table or set of tables :param table: the name of the table(s) :type table: str, list or set :param bool restart: whether table sequences should be restarted :param bool cascade: whether referenced tables should also be truncated :param only: whether only parent tables should be truncated :type only: bool or list This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If *restart* is set to `True`, sequences owned by columns of the truncated table(s) are automatically restarted. If *cascade* is set to `True`, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter *only* is not set to `True`, all the descendant tables (if any) will also be truncated. Optionally, a ``*`` can be specified after the table name to explicitly indicate that descendant tables are included. If the parameter *table* is a list, the parameter *only* can also be a list of corresponding boolean values. .. versionadded:: 4.2 get_as_list/dict -- read a table as a list or dictionary -------------------------------------------------------- .. method:: DB.get_as_list(table, [what], [where], [order], [limit], [offset], [scalar]) Get a table as a list :param str table: the name of the table (the FROM clause) :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: list :raises TypeError: the table name has not been specified This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter *what* can restrict the query to only return a subset of the table columns. The parameter *where* can restrict the query to only return a subset of the table rows. The specified SQL expressions all need to be fulfilled for a row to get into the result. The parameter *order* specifies the ordering of the rows. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set *order* to *False* if you don't care about the ordering. The parameters *limit* and *offset* specify the maximum number of rows returned and a number of rows skipped over. If you set the *scalar* option to *True*, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. .. versionadded:: 5.0 .. method:: DB.get_as_dict(table, [keyname], [what], [where], [order], [limit], [offset], [scalar]) Get a table as a dictionary :param str table: the name of the table (the FROM clause) :param keyname: column(s) to be used as key(s) of the dictionary :type keyname: str, list, tuple or None :param what: column(s) to be returned (the SELECT clause) :type what: str, list, tuple or None :param where: conditions(s) to be fulfilled (the WHERE clause) :type where: str, list, tuple or None :param order: column(s) to sort by (the ORDER BY clause) :type order: str, list, tuple, False or None :param int limit: maximum number of rows returned (the LIMIT clause) :param int offset: number of rows to be skipped (the OFFSET clause) :param bool scalar: whether only the first column shall be returned :returns: the content of the table as a list :rtype: dict :raises TypeError: the table name has not been specified :raises KeyError: keyname(s) are invalid or not part of the result :raises pg.ProgrammingError: no keyname(s) and table has no primary key This method is similar to :meth:`DB.get_as_list`, but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the *scalar* option has been set to *True*. With the optional parameter *keyname* you can specify a different set of columns to be used as the keys of the dictionary. The dictionary will be ordered using the order specified with the *order* parameter or the key column(s) if not specified. You can set *order* to *False* if you don't care about the ordering. .. versionadded:: 5.0 escape_literal/identifier/string/bytea -- escape for SQL -------------------------------------------------------- The following methods escape text or binary strings so that they can be inserted directly into an SQL command. Except for :meth:`DB.escape_bytea`, you don't need to call these methods for the strings passed as parameters to :meth:`DB.query`. You also don't need to call any of these methods when storing data using :meth:`DB.insert` and similar. .. method:: DB.escape_literal(string) Escape a string for use within SQL as a literal constant :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. .. versionadded:: 4.1 .. method:: DB.escape_identifier(string) Escape a string for use within SQL as an identifier :param str string: the string that is to be escaped :returns: the escaped string :rtype: str This method escapes a string for use as an SQL identifier, such as a table, column, or function name. This is useful when a user-supplied identifier might contain special characters that would otherwise be misinterpreted by the SQL parser, or when the identifier might contain upper case characters whose case should be preserved. .. versionadded:: 4.1 .. method:: DB.escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str Similar to the module function :func:`pg.escape_string` with the same name, but the behavior of this method is adjusted depending on the connection properties (such as character encoding). .. method:: DB.escape_bytea(datastring) Escape binary data for use within SQL as type ``bytea`` :param bytes/str datastring: the binary data that is to be escaped :returns: the escaped string :rtype: bytes/str Similar to the module function :func:`pg.escape_bytea` with the same name, but the behavior of this method is adjusted depending on the connection properties (in particular, whether standard-conforming strings are enabled). unescape_bytea -- unescape data retrieved from the database ----------------------------------------------------------- .. method:: DB.unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param str string: the ``bytea`` string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :meth:`DB.escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. encode/decode_json -- encode and decode JSON data ------------------------------------------------- The following methods can be used to encode end decode data in `JSON `_ format. .. method:: DB.encode_json(obj) Encode a Python object for use within SQL as type ``json`` or ``jsonb`` :param obj: Python object that shall be encoded to JSON format :type obj: dict, list or None :returns: string representation of the Python object in JSON format :rtype: str This method serializes a Python object into a JSON formatted string that can be used within SQL. You don't need to use this method on the data stored with :meth:`DB.insert` and similar, only if you store the data directly as part of an SQL command or parameter with :meth:`DB.query`. This is the same as the :func:`json.dumps` function from the standard library. .. versionadded:: 5.0 .. method:: DB.decode_json(string) Decode ``json`` or ``jsonb`` data that has been retrieved as text :param string: JSON formatted string shall be decoded into a Python object :type string: str :returns: Python object representing the JSON formatted string :rtype: dict, list or None This method deserializes a JSON formatted string retrieved as text from the database to a Python object. You normally don't need to use this method as JSON data is automatically decoded by PyGreSQL. If you don't want the data to be decoded, then you can cast ``json`` or ``jsonb`` columns to ``text`` in PostgreSQL or you can set the decoding function to *None* or a different function using :func:`pg.set_jsondecode`. By default this is the same as the :func:`json.loads` function from the standard library. .. versionadded:: 5.0 use_regtypes -- choose usage of registered type names ----------------------------------------------------- .. method:: DB.use_regtypes([regtypes]) Determine whether registered type names shall be used :param bool regtypes: if passed, set whether registered type names shall be used :returns: whether registered type names are used The :meth:`DB.get_attnames` method can return either simplified "classic" type names (the default) or more fine-grained "registered" type names. Which kind of type names is used can be changed by calling :meth:`DB.get_regtypes`. If you pass a boolean, it sets whether registered type names shall be used. The method can also be used to check through its return value whether registered type names are currently used. .. versionadded:: 4.1 notification_handler -- create a notification handler ----------------------------------------------------- .. class:: DB.notification_handler(event, callback, [arg_dict], [timeout], [stop_event]) Create a notification handler instance :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event This method creates a :class:`pg.NotificationHandler` object using the :class:`DB` connection as explained under :doc:`notification`. .. versionadded:: 4.1.1 Attributes of the DB wrapper class ---------------------------------- .. attribute:: DB.db The wrapped :class:`Connection` object You normally don't need this, since all of the members can be accessed from the :class:`DB` wrapper class as well. .. attribute:: DB.dbname The name of the database that the connection is using .. attribute:: DB.dbtypes A dictionary with the various type names for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`DbTypes` class for details. .. versionadded:: 5.0 .. attribute:: DB.adapter A class with some helper functions for adapting parameters This can be used for building queries with parameters. You normally will not need this, as you can use the :class:`DB.query_formatted` method. .. versionadded:: 5.0 PyGreSQL-PyGreSQL-166b135/docs/contents/pg/index.rst000066400000000000000000000004731450706350600220430ustar00rootroot00000000000000-------------------------------------------- :mod:`pg` --- The Classic PyGreSQL Interface -------------------------------------------- .. module:: pg Contents ======== .. toctree:: introduction module connection db_wrapper query large_objects notification db_types adaptation PyGreSQL-PyGreSQL-166b135/docs/contents/pg/introduction.rst000066400000000000000000000015241450706350600234530ustar00rootroot00000000000000Introduction ============ .. currentmodule:: pg You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the older :mod:`pg` API. The :mod:`pg` module handles three types of objects, - the :class:`Connection` instances, which handle the connection and all the requests to the database, - the :class:`LargeObject` instances, which handle all the accesses to PostgreSQL large objects, - the :class:`Query` instances that handle query results and it provides a convenient wrapper class :class:`DB` for the basic :class:`Connection` class. .. seealso:: If you want to see a simple example of the use of some of these functions, see the :doc:`../examples` page. PyGreSQL-PyGreSQL-166b135/docs/contents/pg/large_objects.rst000066400000000000000000000145041450706350600235370ustar00rootroot00000000000000LargeObject -- Large Objects ============================ .. currentmodule:: pg .. class:: LargeObject Instances of the class :class:`LargeObject` are used to handle all the requests concerning a PostgreSQL large object. These objects embed and hide all the recurring variables (object OID and connection), in the same way :class:`Connection` instances do, thus only keeping significant parameters in function calls. The :class:`LargeObject` instance keeps a reference to the :class:`Connection` object used for its creation, sending requests through with its parameters. Any modification other than dereferencing the :class:`Connection` object will thus affect the :class:`LargeObject` instance. Dereferencing the initial :class:`Connection` object is not a problem since Python won't deallocate it before the :class:`LargeObject` instance dereferences it. All functions return a generic error message on error. The exact error message is provided by the object's :attr:`error` attribute. See also the PostgreSQL documentation for more information about the `large object interface`__. __ https://www.postgresql.org/docs/current/largeobjects.html open -- open a large object --------------------------- .. method:: LargeObject.open(mode) Open a large object :param int mode: open mode definition :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: already opened object, or open error This method opens a large object for reading/writing, in a similar manner as the Unix open() function does for files. The mode value can be obtained by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE`). close -- close a large object ----------------------------- .. method:: LargeObject.close() Close a large object :rtype: None :raises TypeError: invalid connection :raises TypeError: too many parameters :raises IOError: object is not opened, or close error This method closes a previously opened large object, in a similar manner as the Unix close() function. read, write, tell, seek, unlink -- file-like large object handling ------------------------------------------------------------------ .. method:: LargeObject.read(size) Read data from large object :param int size: maximum size of the buffer to be read :returns: the read buffer :rtype: bytes :raises TypeError: invalid connection, invalid object, bad parameter type, or too many parameters :raises ValueError: if `size` is negative :raises IOError: object is not opened, or read error This function allows reading data from a large object, starting at the current position. .. method:: LargeObject.write(string) Write data to large object :param bytes data: buffer of bytes to be written :rtype: None :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises IOError: object is not opened, or write error This function allows writing data to a large object, starting at the current position. .. method:: LargeObject.seek(offset, whence) Change current position in large object :param int offset: position offset :param int whence: positional parameter :returns: new position in object :rtype: int :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not opened, or seek error This method updates the position offset in the large object. The valid values for the whence parameter are defined as constants in the :mod:`pg` module (:const:`SEEK_SET`, :const:`SEEK_CUR`, :const:`SEEK_END`). .. method:: LargeObject.tell() Return current position in large object :returns: current position in large object :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek error This method returns the current position offset in the large object. .. method:: LargeObject.unlink() Delete large object :rtype: None :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not closed, or unlink error This methods unlinks (deletes) the PostgreSQL large object. size -- get the large object size --------------------------------- .. method:: LargeObject.size() Return the large object size :returns: the large object size :rtype: int :raises TypeError: invalid connection or invalid object :raises TypeError: too many parameters :raises IOError: object is not opened, or seek/tell error This (composite) method returns the size of a large object. It was implemented because this function is very useful for a web interfaced database. Currently, the large object needs to be opened first. export -- save a large object to a file --------------------------------------- .. method:: LargeObject.export(name) Export a large object to a file :param str name: file to be created :rtype: None :raises TypeError: invalid connection or invalid object, bad parameter type, or too many parameters :raises IOError: object is not closed, or export error This methods allows saving the content of a large object to a file in a very simple way. The file is created on the host running the PyGreSQL interface, not on the server host. Object attributes ----------------- :class:`LargeObject` objects define a read-only set of attributes exposing some information about it. These attributes are: .. attribute:: LargeObject.oid the OID associated with the large object (int) .. attribute:: LargeObject.pgcnx the :class:`Connection` object associated with the large object .. attribute:: LargeObject.error the last warning/error message of the connection (str) .. warning:: In multi-threaded environments, :attr:`LargeObject.error` may be modified by another thread using the same :class:`Connection`. Remember these objects are shared, not duplicated. You should provide some locking if you want to use this information in a program in which it's shared between multiple threads. The :attr:`LargeObject.oid` attribute is very interesting, because it allows you to reuse the OID later, creating the :class:`LargeObject` object with a :meth:`Connection.getlo` method call. PyGreSQL-PyGreSQL-166b135/docs/contents/pg/module.rst000066400000000000000000000703431450706350600222240ustar00rootroot00000000000000Module functions and constants ============================== .. currentmodule:: pg The :mod:`pg` module defines a few functions that allow to connect to a database and to define "default variables" that override the environment variables used by PostgreSQL. These "default variables" were designed to allow you to handle general connection parameters without heavy code in your programs. You can prompt the user for a value, put it in the default variable, and forget it, without having to modify your environment. All variables are set to ``None`` at module initialization, specifying that standard environment variables should be used. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dbname], [host], [port], [opt], [user], [passwd], [nowait]) Open a :mod:`pg` connection :param dbname: name of connected database (*None* = :data:`defbase`) :type str: str or None :param host: name of the server host (*None* = :data:`defhost`) :type host: str or None :param port: port used by the database server (-1 = :data:`defport`) :type port: int :param opt: connection options (*None* = :data:`defopt`) :type opt: str or None :param user: PostgreSQL user (*None* = :data:`defuser`) :type user: str or None :param passwd: password for user (*None* = :data:`defpasswd`) :type passwd: str or None :param nowait: whether the connection should happen asynchronously :type nowait: bool :returns: If successful, the :class:`Connection` handling the connection :rtype: :class:`Connection` :raises TypeError: bad argument type, or too many arguments :raises SyntaxError: duplicate argument definition :raises pg.InternalError: some error occurred during pg connection definition :raises Exception: (all exceptions relative to object allocation) This function opens a connection to a specified database on a given PostgreSQL server. You can use keywords here, as described in the Python tutorial. The names of the keywords are the name of the parameters given in the syntax line. The ``opt`` parameter can be used to pass command-line options to the server. For a precise description of the parameters, please refer to the PostgreSQL user manual. See :meth:`Connection.poll` for a description of the ``nowait`` parameter. If you want to add additional parameters not specified here, you must pass a connection string or a connection URI instead of the ``dbname`` (as in ``con3`` and ``con4`` in the following example). .. versionchanged:: 5.2 Support for asynchronous connections via the ``nowait`` parameter. Example:: import pg con1 = pg.connect('testdb', 'myhost', 5432, None, 'bob', None) con2 = pg.connect(dbname='testdb', host='myhost', user='bob') con3 = pg.connect('host=myhost user=bob dbname=testdb connect_timeout=10') con4 = pg.connect('postgresql://bob@myhost/testdb?connect_timeout=10') get_pqlib_version -- get the version of libpq --------------------------------------------- .. function:: get_pqlib_version() Get the version of libpq that is being used by PyGreSQL :returns: the version of libpq :rtype: int :raises TypeError: too many arguments The number is formed by converting the major, minor, and revision numbers of the libpq version into two-decimal-digit numbers and appending them together. For example, version 15.4 will be returned as 150400. .. versionadded:: 5.2 get/set_defhost -- default server host -------------------------------------- .. function:: get_defhost(host) Get the default host :returns: the current default host specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default host specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defhost(host) Set the default host :param host: the new default host specification :type host: str or None :returns: the previous default host specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default host value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defport -- default server port -------------------------------------- .. function:: get_defport() Get the default port :returns: the current default port specification :rtype: int :raises TypeError: too many arguments This method returns the current default port specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defport(port) Set the default port :param port: the new default port :type port: int :returns: previous default port specification :rtype: int or None This methods sets the default port value for new connections. If -1 is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default port. get/set_defopt -- default connection options --------------------------------------------- .. function:: get_defopt() Get the default connection options :returns: the current default options specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default connection options specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defopt(options) Set the default connection options :param options: the new default connection options :type options: str or None :returns: previous default options specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This methods sets the default connection options value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default options. get/set_defbase -- default database name ---------------------------------------- .. function:: get_defbase() Get the default database name :returns: the current default database name specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database name specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defbase(base) Set the default database name :param base: the new default base name :type base: str or None :returns: the previous default database name specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database name value for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defuser -- default database user ---------------------------------------- .. function:: get_defuser() Get the default database user :returns: the current default database user specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database user specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defuser(user) Set the default database user :param user: the new default database user :type base: str or None :returns: the previous default database user specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database user name for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. get/set_defpasswd -- default database password ---------------------------------------------- .. function:: get_defpasswd() Get the default database password :returns: the current default database password specification :rtype: str or None :raises TypeError: too many arguments This method returns the current default database password specification, or ``None`` if the environment variables should be used. Environment variables won't be looked up. .. function:: set_defpasswd(passwd) Set the default database password :param passwd: the new default database password :type base: str or None :returns: the previous default database password specification :rtype: str or None :raises TypeError: bad argument type, or too many arguments This method sets the default database password for new connections. If ``None`` is supplied as parameter, environment variables will be used in future connections. It returns the previous setting for default host. escape_string -- escape a string for use within SQL --------------------------------------------------- .. function:: escape_string(string) Escape a string for use within SQL :param str string: the string that is to be escaped :returns: the escaped string :rtype: str :raises TypeError: bad argument type, or too many arguments This function escapes a string for use within an SQL command. This is useful when inserting data values as literal constants in SQL commands. Certain characters (such as quotes and backslashes) must be escaped to prevent them from being interpreted specially by the SQL parser. :func:`escape_string` performs this operation. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. .. note:: It is especially important to do proper escaping when handling strings that were received from an untrustworthy source. Otherwise there is a security risk: you are vulnerable to "SQL injection" attacks wherein unwanted SQL commands are fed to your database. Example:: name = input("Name? ") phone = con.query("select phone from employees" f" where name='{escape_string(name)}'").singlescalar() escape_bytea -- escape binary data for use within SQL ----------------------------------------------------- .. function:: escape_bytea(datastring) escape binary data for use within SQL as type ``bytea`` :param bytes/str datastring: the binary data that is to be escaped :returns: the escaped string :rtype: bytes/str :raises TypeError: bad argument type, or too many arguments Escapes binary data for use within an SQL command with the type ``bytea``. The return value will have the same type as the given *datastring*. As with :func:`escape_string`, this is only used when inserting data directly into an SQL command string. Note that there is also a :class:`Connection` method with the same name which takes connection properties into account. Example:: picture = open('garfield.gif', 'rb').read() con.query(f"update pictures set img='{escape_bytea(picture)}'" " where name='Garfield'") unescape_bytea -- unescape data that has been retrieved as text --------------------------------------------------------------- .. function:: unescape_bytea(string) Unescape ``bytea`` data that has been retrieved as text :param str string: the ``bytea`` string that has been retrieved as text :returns: byte string containing the binary data :rtype: bytes :raises TypeError: bad argument type, or too many arguments Converts an escaped string representation of binary data stored as ``bytea`` into the raw byte string representing the binary data -- this is the reverse of :func:`escape_bytea`. Since the :class:`Query` results will already return unescaped byte strings, you normally don't have to use this method. Note that there is also a :class:`DB` method with the same name which does exactly the same. get/set_decimal -- decimal type to be used for numeric values ------------------------------------------------------------- .. function:: get_decimal() Get the decimal type to be used for numeric values :returns: the Python class used for PostgreSQL numeric values :rtype: class This function returns the Python class that is used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal`. .. function:: set_decimal(cls) Set a decimal type to be used for numeric values :param class cls: the Python class to be used for PostgreSQL numeric values This function can be used to specify the Python class that shall be used by PyGreSQL to hold PostgreSQL numeric values. The default class is :class:`decimal.Decimal`. get/set_decimal_point -- decimal mark used for monetary values -------------------------------------------------------------- .. function:: get_decimal_point() Get the decimal mark used for monetary values :returns: string with one character representing the decimal mark :rtype: str This function returns the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values when converting them to decimal numbers. The default setting is ``'.'`` as a decimal point. This setting is not adapted automatically to the locale used by PostgreSQL, but you can use :func:`set_decimal()` to set a different decimal mark manually. A return value of ``None`` means monetary values are not interpreted as decimal numbers, but returned as strings including the formatting and currency. .. versionadded:: 4.1.1 .. function:: set_decimal_point(string) Specify which decimal mark is used for interpreting monetary values :param str string: string with one character representing the decimal mark This function can be used to specify the decimal mark used by PyGreSQL to interpret PostgreSQL monetary values. The default value is '.' as a decimal point. This value is not adapted automatically to the locale used by PostgreSQL, so if you are dealing with a database set to a locale that uses a ``','`` instead of ``'.'`` as the decimal point, then you need to call ``set_decimal(',')`` to have PyGreSQL interpret monetary values correctly. If you don't want money values to be converted to decimal numbers, then you can call ``set_decimal(None)``, which will cause PyGreSQL to return monetary values as strings including their formatting and currency. .. versionadded:: 4.1.1 get/set_bool -- whether boolean values are returned as bool objects ------------------------------------------------------------------- .. function:: get_bool() Check whether boolean values are returned as bool objects :returns: whether or not bool objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this with the :func:`set_bool` function. .. versionadded:: 4.2 .. function:: set_bool(on) Set whether boolean values are returned as bool objects :param on: whether or not bool objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL boolean values converted to Python bool objects, or as ``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. By default, conversion to bool objects is activated, but you can disable this by calling ``set_bool(True)``. .. versionadded:: 4.2 .. versionchanged:: 5.0 Boolean values had been returned as string by default in earlier versions. get/set_array -- whether arrays are returned as list objects ------------------------------------------------------------ .. function:: get_array() Check whether arrays are returned as list objects :returns: whether or not list objects will be returned :rtype: bool This function checks whether PyGreSQL returns PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this with the :func:`set_array` function. .. versionadded:: 5.0 .. function:: set_array(on) Set whether arrays are returned as list objects :param on: whether or not list objects shall be returned This function can be used to specify whether PyGreSQL shall return PostgreSQL arrays converted to Python list objects, or simply as text in the internal special output syntax of PostgreSQL. By default, conversion to list objects is activated, but you can disable this by calling ``set_array(False)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Arrays had been always returned as text strings in earlier versions. get/set_bytea_escaped -- whether bytea data is returned escaped --------------------------------------------------------------- .. function:: get_bytea_escaped() Check whether bytea values are returned as escaped strings :returns: whether or not bytea objects will be returned escaped :rtype: bool This function checks whether PyGreSQL returns PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this with the :func:`set_bytea_escaped` function. .. versionadded:: 5.0 .. function:: set_bytea_escaped(on) Set whether bytea values are returned as escaped strings :param on: whether or not bytea objects shall be returned escaped This function can be used to specify whether PyGreSQL shall return PostgreSQL ``bytea`` values in escaped form or in unescaped from as byte strings. By default, bytea values will be returned unescaped as byte strings, but you can change this by calling ``set_bytea_escaped(True)``. .. versionadded:: 5.0 .. versionchanged:: 5.0 Bytea data had been returned in escaped form by default in earlier versions. get/set_jsondecode -- decoding JSON format ------------------------------------------ .. function:: get_jsondecode() Get the function that deserializes JSON formatted strings This returns the function used by PyGreSQL to construct Python objects from JSON formatted strings. .. function:: set_jsondecode(func) Set a function that will deserialize JSON formatted strings :param func: the function to be used for deserializing JSON strings You can use this if you do not want to deserialize JSON strings coming in from the database, or if want to use a different function than the standard function :func:`json.loads` or if you want to use it with parameters different from the default ones. If you set this function to *None*, then the automatic deserialization of JSON strings will be deactivated. .. versionadded:: 5.0 .. versionchanged:: 5.0 JSON data had been always returned as text strings in earlier versions. get/set_datestyle -- assume a fixed date style ---------------------------------------------- .. function:: get_datestyle() Get the assumed date style for typecasting This returns the PostgreSQL date style that is silently assumed when typecasting dates or *None* if no fixed date style is assumed, in which case the date style is requested from the database when necessary (this is the default). Note that this method will *not* get the date style that is currently set in the session or in the database. You can get the current setting with the methods :meth:`DB.get_parameter` and :meth:`Connection.parameter`. You can also get the date format corresponding to the current date style by calling :meth:`Connection.date_format`. .. versionadded:: 5.0 .. function:: set_datestyle(datestyle) Set a fixed date style that shall be assumed when typecasting :param str datestyle: the date style that shall be assumed, or *None* if no fixed dat style shall be assumed PyGreSQL is able to automatically pick up the right date style for typecasting date values from the database, even if you change it for the current session with a ``SET DateStyle`` command. This is happens very effectively without an additional database request being involved. If you still want to have PyGreSQL always assume a fixed date style instead, then you can set one with this function. Note that calling this function will *not* alter the date style of the database or the current session. You can do that by calling the method :meth:`DB.set_parameter` instead. .. versionadded:: 5.0 get/set_typecast -- custom typecasting -------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides through its C extension module basic typecast functions for the common database types, but if you want to add more typecast functions, you can set these using the following functions. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or list of type names :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`DbTypes` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`DbTypes.get_typecast`, :meth:`DbTypes.set_typecast` and :meth:`DbTypes.reset_typecast` of the :attr:`DB.dbtypes` object. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`DbTypes.reset_typecast` on the :attr:`DB.dbtypes` object. Also note that the typecasting for all of the basic types happens already in the C extension module. The typecast functions that can be set with the above methods are only called for the types that are not already supported by the C extension module. cast_array/record -- fast parsers for arrays and records -------------------------------------------------------- PostgreSQL returns arrays and records (composite types) using a special output syntax with several quirks that cannot easily and quickly be parsed in Python. Therefore the C extension module provides two fast parsers that allow quickly turning these text representations into Python objects: Arrays will be converted to Python lists, and records to Python tuples. These fast parsers are used automatically by PyGreSQL in order to return arrays and records from database queries as lists and tuples, so you normally don't need to call them directly. You may only need them for typecasting arrays of data types that are not supported by default in PostgreSQL. .. function:: cast_array(string, [cast], [delim]) Cast a string representing a PostgreSQL array to a Python list :param str string: the string with the text representation of the array :param cast: a typecast function for the elements of the array :type cast: callable or None :param bytes delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a list representing the PostgreSQL array in Python :rtype: list :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL array (which may look like ``'{{1,2}{3,4}}'`` for a two-dimensional array), a typecast function *cast* that is called for every element, and an optional delimiter character *delim* (usually a comma), and returns a Python list representing the array (which may be nested like ``[[1, 2], [3, 4]]`` in this example). The cast function must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the list. If you don't pass a cast function or set it to *None*, then unprocessed text strings will be returned as elements of the array. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 .. function:: cast_record(string, [cast], [delim]) Cast a string representing a PostgreSQL record to a Python tuple :param str string: the string with the text representation of the record :param cast: typecast function(s) for the elements of the record :type cast: callable, list or tuple of callables, or None :param bytes delim: delimiter character between adjacent elements :type str: byte string with a single character :returns: a tuple representing the PostgreSQL record in Python :rtype: tuple :raises TypeError: invalid argument types :raises ValueError: error in the syntax of the given array This function takes a *string* containing the text representation of a PostgreSQL record (which may look like ``'(1,a,2,b)'`` for a record composed of four fields), a typecast function *cast* that is called for every element, or a list or tuple of such functions corresponding to the individual fields of the record, and an optional delimiter character *delim* (usually a comma), and returns a Python tuple representing the record (which may be inhomogeneous like ``(1, 'a', 2, 'b')`` in this example). The cast function(s) must take a single argument which will be the text representation of the element and must output the corresponding Python object that shall be put into the tuple. If you don't pass cast function(s) or pass *None* instead, then unprocessed text strings will be returned as elements of the tuple. If you don't pass a delimiter character, then a comma will be used by default. .. versionadded:: 5.0 Note that besides using parentheses instead of braces, there are other subtle differences in escaping special characters and NULL values between the syntax used for arrays and the one used for composite types, which these functions take into account. Type helpers ------------ The module provides the following type helper functions. You can wrap parameters with these functions when passing them to :meth:`DB.query` or :meth:`DB.query_formatted` in order to give PyGreSQL a hint about the type of the parameters, if it cannot be derived from the context. .. function:: Bytea(bytes) A wrapper for holding a bytea value .. versionadded:: 5.0 .. function:: HStore(dict) A wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj) A wrapper for holding an object serializable to JSON .. versionadded:: 5.0 The following additional type helper is only meaningful when used with :meth:`DB.query_formatted`. It marks a parameter as text that shall be literally included into the SQL. This is useful for passing table names for instance. .. function:: Literal(sql) A wrapper for holding a literal SQL string .. versionadded:: 5.0 Module constants ---------------- Some constants are defined in the module dictionary. They are intended to be used as parameters for methods calls. You should refer to the libpq description in the PostgreSQL user manual for more information about them. These constants are: .. data:: version .. data:: __version__ constants that give the current version .. data:: INV_READ .. data:: INV_WRITE large objects access modes, used by :meth:`Connection.locreate` and :meth:`LargeObject.open` .. data:: POLLING_OK .. data:: POLLING_FAILED .. data:: POLLING_READING .. data:: POLLING_WRITING polling states, returned by :meth:`Connection.poll` .. data:: SEEK_SET .. data:: SEEK_CUR .. data:: SEEK_END positional flags, used by :meth:`LargeObject.seek` .. data:: TRANS_IDLE .. data:: TRANS_ACTIVE .. data:: TRANS_INTRANS .. data:: TRANS_INERROR .. data:: TRANS_UNKNOWN transaction states, used by :meth:`Connection.transaction` PyGreSQL-PyGreSQL-166b135/docs/contents/pg/notification.rst000066400000000000000000000106751450706350600234270ustar00rootroot00000000000000The Notification Handler ======================== .. currentmodule:: pg PyGreSQL comes with a client-side asynchronous notification handler that was based on the ``pgnotify`` module written by Ng Pheng Siong. .. versionadded:: 4.1.1 Instantiating the notification handler -------------------------------------- .. class:: NotificationHandler(db, event, callback, [arg_dict], [timeout], [stop_event]) Create an instance of the notification handler :param int db: the database connection :type db: :class:`Connection` :param str event: the name of an event to listen for :param callback: a callback function :param dict arg_dict: an optional dictionary for passing arguments :param timeout: the time-out when waiting for notifications :type timeout: int, float or None :param str stop_event: an optional different name to be used as stop event You can also create an instance of the NotificationHandler using the :meth:`DB.connection_handler` method. In this case you don't need to pass a database connection because the :class:`DB` connection itself will be used as the datebase connection for the notification handler. You must always pass the name of an *event* (notification channel) to listen for and a *callback* function. You can also specify a dictionary *arg_dict* that will be passed as the single argument to the callback function, and a *timeout* value in seconds (a floating point number denotes fractions of seconds). If it is absent or *None*, the callers will never time out. If the time-out is reached, the callback function will be called with a single argument that is *None*. If you set the *timeout* to ``0``, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as *stop_event*. By default, it will be the event name prefixed with ``'stop_'``. All of the parameters will be also available as attributes of the created notification handler object. Invoking the notification handler --------------------------------- To invoke the notification handler, just call the instance without passing any parameters. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated *pid*, *event* and *extra* (the payload passed with the notification) are inserted into its *arg_dict* dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to ``0``, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. .. warning:: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. Sending notifications --------------------- You can send notifications by either running ``NOTIFY`` commands on the database directly, or using the following method: .. method:: NotificationHandler.notify([db], [stop], [payload]) Generate a notification :param int db: the database connection for sending the notification :type db: :class:`Connection` :param bool stop: whether to produce a normal event or a stop event :param str payload: an optional payload to be sent with the notification This method sends a notification event together with an optional *payload*. If you set the *stop* flag, a stop notification will be sent instead of a normal notification. This will cause the handler to stop listening. .. warning:: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. Auxiliary methods ----------------- .. method:: NotificationHandler.listen() Start listening for the event and the stop event This method is called implicitly when the handler is invoked. .. method:: NotificationHandler.unlisten() Stop listening for the event and the stop event This method is called implicitly when the handler receives a stop event or when it is closed or deleted. .. method:: NotificationHandler.close() Stop listening and close the database connection You can call this method instead of :meth:`NotificationHandler.unlisten` if you want to close not only the handler, but also the database connection it was created with. PyGreSQL-PyGreSQL-166b135/docs/contents/pg/query.rst000066400000000000000000000346531450706350600221100ustar00rootroot00000000000000Query methods ============= .. currentmodule:: pg .. class:: Query The :class:`Query` object returned by :meth:`Connection.query` and :meth:`DB.query` can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the :func:`len` function. The :class:`Query` class also provides the following methods for accessing the results of the query: getresult -- get query values as list of tuples ----------------------------------------------- .. method:: Query.getresult() Get query values as list of tuples :returns: result values as a list of tuples :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of tuples. More information about this result may be accessed using :meth:`Query.listfields`, :meth:`Query.fieldname` and :meth:`Query.fieldnum` methods. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. Since PyGreSQL 5.1 the :class:`Query` can be also used directly as an iterable sequence, i.e. you can iterate over the :class:`Query` object to get the same tuples as returned by :meth:`Query.getresult`. This is slightly more efficient than getting the full list of results, but note that the full result is always fetched from the server anyway when the query is executed. You can also call :func:`len` on a query to find the number of rows in the result, and access row tuples using their index directly on the :class:`Query` object. When the :class:`Query` object was returned by :meth:`Connection.send_query`, other return values are also possible, as documented there. dictresult/dictiter -- get query values as dictionaries ------------------------------------------------------- .. method:: Query.dictresult() Get query values as list of dictionaries :returns: result values as a list of dictionaries :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as a list of dictionaries which have the field names as keys. If the query has duplicate field names, you will get the value for the field with the highest index in the query. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. method:: Query.dictiter() Get query values as iterable of dictionaries :returns: result values as an iterable of dictionaries :rtype: iterable :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns query results as an iterable of dictionaries which have the field names as keys. This is slightly more efficient than getting the full list of results as dictionaries, but note that the full result is always fetched from the server anyway when the query is executed. If the query has duplicate field names, you will get the value for the field with the highest index in the query. When the :class:`Query` object was returned by :meth:`Connection.send_query`, other return values are also possible, as documented there. .. versionadded:: 5.1 namedresult/namediter -- get query values as named tuples --------------------------------------------------------- .. method:: Query.namedresult() Get query values as list of named tuples :returns: result values as a list of named tuples :rtype: list :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as a list of named tuples with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. Note that since PyGreSQL 5.0 this method will return the values of array type columns as Python lists. .. versionadded:: 4.1 .. method:: Query.namediter() Get query values as iterable of named tuples :returns: result values as an iterable of named tuples :rtype: iterable :raises TypeError: too many (any) parameters :raises TypeError: named tuples not supported :raises MemoryError: internal memory error This method returns query results as an iterable of named tuples with proper field names. This is slightly more efficient than getting the full list of results as named tuples, but note that the full result is always fetched from the server anyway when the query is executed. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. When the :class:`Query` object was returned by :meth:`Connection.send_query`, other return values are also possible, as documented there. .. versionadded:: 5.1 scalarresult/scalariter -- get query values as scalars ------------------------------------------------------ .. method:: Query.scalarresult() Get first fields from query result as list of scalar values :returns: first fields from result as a list of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as a list of scalar values in the order returned by the server. .. versionadded:: 5.1 .. method:: Query.scalariter() Get first fields from query result as iterable of scalar values :returns: first fields from result as an iterable of scalar values :rtype: list :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error This method returns the first fields from the query results as an iterable of scalar values in the order returned by the server. This is slightly more efficient than getting the full list of results as rows or scalar values, but note that the full result is always fetched from the server anyway when the query is executed. .. versionadded:: 5.1 one/onedict/onenamed/onescalar -- get one result of a query ----------------------------------------------------------- .. method:: Query.one() Get one row from the result of a query as a tuple :returns: next row from the query results as a tuple of fields :rtype: tuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a tuple of fields. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onedict() Get one row from the result of a query as a dictionary :returns: next row from the query results as a dictionary :rtype: dict or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a dictionary with the field names used as the keys. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onenamed() Get one row from the result of a query as named tuple :returns: next row from the query results as a named tuple :rtype: namedtuple or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns only one row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method can be called multiple times to return more rows. It returns None if the result does not contain one more row. .. versionadded:: 5.1 .. method:: Query.onescalar() Get one row from the result of a query as scalar value :returns: next row from the query results as a scalar value :rtype: type of first field or None :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of the next row from the result as a scalar value. This method can be called multiple times to return more rows as scalars. It returns None if the result does not contain one more row. .. versionadded:: 5.1 single/singledict/singlenamed/singlescalar -- get single result of a query -------------------------------------------------------------------------- .. method:: Query.single() Get single row from the result of a query as a tuple :returns: single row from the query results as a tuple of fields :rtype: tuple :raises pg.InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a tuple of fields. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singledict() Get single row from the result of a query as a dictionary :returns: single row from the query results as a dictionary :rtype: dict :raises pg.InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns a single row from the result as a dictionary with the field names used as the keys. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlenamed() Get single row from the result of a query as named tuple :returns: single row from the query results as a named tuple :rtype: namedtuple :raises pg.InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns single row from the result as a named tuple with proper field names. Column names in the database that are not valid as field names for named tuples (particularly, names starting with an underscore) are automatically renamed to valid positional names. This method returns the same single row when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 .. method:: Query.singlescalar() Get single row from the result of a query as scalar value :returns: single row from the query results as a scalar value :rtype: type of first field :raises pg.InvalidResultError: result does not have exactly one row :raises TypeError: too many (any) parameters :raises MemoryError: internal memory error Returns the first field of a single row from the result as a scalar value. This method returns the same single row as scalar when called multiple times. It raises an :exc:`pg.InvalidResultError` if the result does not have exactly one row. More specifically, this will be of type :exc:`pg.NoResultError` if it is empty and of type :exc:`pg.MultipleResultsError` if it has multiple rows. .. versionadded:: 5.1 listfields -- list field names of query result ---------------------------------------------- .. method:: Query.listfields() List field names of query result :returns: field names :rtype: tuple :raises TypeError: too many parameters This method returns the tuple of field names defined for the query result. The fields are in the same order as the result values. fieldname, fieldnum -- field name/number conversion --------------------------------------------------- .. method:: Query.fieldname(num) Get field name from its number :param int num: field number :returns: field name :rtype: str :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: invalid field number This method allows to find a field name from its rank number. It can be useful for displaying a result. The fields are in the same order as the result values. .. method:: Query.fieldnum(name) Get field number from its name :param str name: field name :returns: field number :rtype: int :raises TypeError: invalid connection, bad parameter type, or too many parameters :raises ValueError: unknown field name This method returns a field number given its name. It can be used to build a function that converts result list strings to their correct type, using a hardcoded table definition. The number returned is the field rank in the query result. fieldinfo -- detailed info about query result fields ---------------------------------------------------- .. method:: Query.fieldinfo([field]) Get information on one or all fields of the query :param field: a column number or name (optional) :type field: int or str :returns: field info tuple(s) for all fields or given field :rtype: tuple :raises IndexError: field does not exist :raises TypeError: too many parameters If the ``field`` is specified by passing either a column number or a field name, a four-tuple with information for the specified field of the query result will be returned. If no ``field`` is specified, a tuple of four-tuples for every field of the previous query result will be returned, in the same order as they appear in the query result. The four-tuples contain the following information: The field name, the internal OID number of the field type, the size in bytes of the column or a negative value if it is of variable size, and a type-specific modifier value. .. versionadded:: 5.2 memsize -- return number of bytes allocated by query result ----------------------------------------------------------- .. method:: Query.memsize() Return number of bytes allocated by query result :returns: number of bytes allocated for the query result :rtype: int :raises TypeError: Too many arguments. This method returns the number of bytes allocated for the query result. .. versionadded:: 5.2 (needs PostgreSQL >= 12) PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/000077500000000000000000000000001450706350600205045ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/adaptation.rst000066400000000000000000000352241450706350600233700ustar00rootroot00000000000000Remarks on Adaptation and Typecasting ===================================== .. currentmodule:: pgdb Both PostgreSQL and Python have the concept of data types, but there are of course differences between the two type systems. Therefore PyGreSQL needs to adapt Python objects to the representation required by PostgreSQL when passing values as query parameters, and it needs to typecast the representation of PostgreSQL data types returned by database queries to Python objects. Here are some explanations about how this works in detail in case you want to better understand or change the default behavior of PyGreSQL. Supported data types -------------------- The following automatic data type conversions are supported by PyGreSQL out of the box. If you need other automatic type conversions or want to change the default conversions, you can achieve this by using the methods explained in the next two sections. ================================== ================== PostgreSQL Python ================================== ================== char, bpchar, name, text, varchar str bool bool bytea bytes int2, int4, int8, oid, serial int int2vector list of int float4, float8 float numeric, money Decimal date datetime.date time, timetz datetime.time timestamp, timestamptz datetime.datetime interval datetime.timedelta hstore dict json, jsonb list or dict uuid uuid.UUID array list [#array]_ record tuple ================================== ================== .. note:: Elements of arrays and records will also be converted accordingly. .. [#array] The first element of the array will always be the first element of the Python list, no matter what the lower bound of the PostgreSQL array is. The information about the start index of the array (which is usually 1 in PostgreSQL, but can also be different from 1) is ignored and gets lost in the conversion to the Python list. If you need that information, you can request it separately with the `array_lower()` function provided by PostgreSQL. Adaptation of parameters ------------------------ PyGreSQL knows how to adapt the common Python types to get a suitable representation of their values for PostgreSQL when you pass parameters to a query. For example:: >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> parameters = (144, 3.75, 'hello', None) >>> tuple(cur.execute('SELECT %s, %s, %s, %s', parameters).fetchone() (144, Decimal('3.75'), 'hello', None) This is the result we can expect, so obviously PyGreSQL has adapted the parameters and sent the following query to PostgreSQL: .. code-block:: sql SELECT 144, 3.75, 'hello', NULL Note the subtle, but important detail that even though the SQL string passed to :meth:`cur.execute` contains conversion specifications normally used in Python with the ``%`` operator for formatting strings, we didn't use the ``%`` operator to format the parameters, but passed them as the second argument to :meth:`cur.execute`. I.e. we **didn't** write the following:: >>> tuple(cur.execute('SELECT %s, %s, %s, %s' % parameters).fetchone() If we had done this, PostgreSQL would have complained because the parameters were not adapted. Particularly, there would be no quotes around the value ``'hello'``, so PostgreSQL would have interpreted this as a database column, which would have caused a :exc:`ProgrammingError`. Also, the Python value ``None`` would have been included in the SQL command literally, instead of being converted to the SQL keyword ``NULL``, which would have been another reason for PostgreSQL to complain about our bad query: .. code-block:: sql SELECT 144, 3.75, hello, None Even worse, building queries with the use of the ``%`` operator makes us vulnerable to so called "SQL injection" exploits, where an attacker inserts malicious SQL statements into our queries that we never intended to be executed. We could avoid this by carefully quoting and escaping the parameters, but this would be tedious and if we overlook something, our code will still be vulnerable. So please don't do this. This cannot be emphasized enough, because it is such a subtle difference and using the ``%`` operator looks so natural: .. warning:: Remember to **never** insert parameters directly into your queries using the ``%`` operator. Always pass the parameters separately. The good thing is that by letting PyGreSQL do the work for you, you can treat all your parameters equally and don't need to ponder where you need to put quotes or need to escape strings. You can and should also always use the general ``%s`` specification instead of e.g. using ``%d`` for integers. Actually, to avoid mistakes and make it easier to insert parameters at more than one location, you can and should use named specifications, like this:: >>> params = dict(greeting='Hello', name='HAL') >>> sql = """SELECT %(greeting)s || ', ' || %(name)s ... || '. Do you read me, ' || %(name)s || '?'""" >>> cur.execute(sql, params).fetchone()[0] 'Hello, HAL. Do you read me, HAL?' PyGreSQL does not only adapt the basic types like ``int``, ``float``, ``bool`` and ``str``, but also tries to make sense of Python lists and tuples. Lists are adapted as PostgreSQL arrays:: >>> params = dict(array=[[1, 2],[3, 4]]) >>> cur.execute("SELECT %(array)s", params).fetchone()[0] [[1, 2], [3, 4]] Note that the query gives the value back as Python lists again. This is achieved by the typecasting mechanism explained in the next section. The query that was actually executed was this: .. code-block:: sql SELECT ARRAY[[1,2],[3,4]] Again, if we had inserted the list using the ``%`` operator without adaptation, the ``ARRAY`` keyword would have been missing in the query. Tuples are adapted as PostgreSQL composite types:: >>> params = dict(record=('Bond', 'James')) >>> cur.execute("SELECT %(record)s", params).fetchone()[0] ('Bond', 'James') You can also use this feature with the ``IN`` syntax of SQL:: >>> params = dict(what='needle', where=('needle', 'haystack')) >>> cur.execute("SELECT %(what)s IN %(where)s", params).fetchone()[0] True Sometimes a Python type can be ambiguous. For instance, you might want to insert a Python list not into an array column, but into a JSON column. Or you want to interpret a string as a date and insert it into a DATE column. In this case you can give PyGreSQL a hint by using :ref:`type_constructors`:: >>> cur.execute("CREATE TABLE json_data (data json, created date)") >>> params = dict( ... data=pgdb.Json([1, 2, 3]), created=pgdb.Date(2016, 1, 29)) >>> sql = ("INSERT INTO json_data VALUES (%(data)s, %(created)s)") >>> cur.execute(sql, params) >>> cur.execute("SELECT * FROM json_data").fetchone() Row(data=[1, 2, 3], created='2016-01-29') Let's think of another example where we create a table with a composite type in PostgreSQL: .. code-block:: sql CREATE TABLE on_hand ( item inventory_item, count integer) We assume the composite type ``inventory_item`` has been created like this: .. code-block:: sql CREATE TYPE inventory_item AS ( name text, supplier_id integer, price numeric) In Python we can use a named tuple as an equivalent to this PostgreSQL type:: >>> from collections import namedtuple >>> inventory_item = namedtuple( ... 'inventory_item', ['name', 'supplier_id', 'price']) Using the automatic adaptation of Python tuples, an item can now be inserted into the database and then read back as follows:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=inventory_item('fuzzy dice', 42, 1.99), count=1000)) >>> cur.execute("SELECT * FROM on_hand").fetchone() Row(item=inventory_item(name='fuzzy dice', supplier_id=42, price=Decimal('1.99')), count=1000) However, we may not want to use named tuples, but custom Python classes to hold our values, like this one:: >>> class InventoryItem: ... ... def __init__(self, name, supplier_id, price): ... self.name = name ... self.supplier_id = supplier_id ... self.price = price ... ... def __str__(self): ... return '{} (from {}, at ${})'.format( ... self.name, self.supplier_id, self.price) But when we try to insert an instance of this class in the same way, we will get an error:: >>> cur.execute("INSERT INTO on_hand VALUES (%(item)s, %(count)s)", ... dict(item=InventoryItem('fuzzy dice', 42, 1.99), count=1000)) InterfaceError: Do not know how to adapt type While PyGreSQL knows how to adapt tuples, it does not know what to make out of our custom class. To simply convert the object to a string using the ``str`` function is not a solution, since this yields a human readable string that is not useful for PostgreSQL. However, it is possible to make such custom classes adapt themselves to PostgreSQL by adding a "magic" method with the name ``__pg_repr__``, like this:: >>> class InventoryItem: ... ... ... ... ... def __str__(self): ... return '{} (from {}, at ${})'.format( ... self.name, self.supplier_id, self.price) ... ... def __pg_repr__(self): ... return (self.name, self.supplier_id, self.price) Now you can insert class instances the same way as you insert named tuples. Note that PyGreSQL adapts the result of ``__pg_repr__`` again if it is a tuple or a list. Otherwise, it must be a properly escaped string. Typecasting to Python --------------------- As you noticed, PyGreSQL automatically converted the PostgreSQL data to suitable Python objects when returning values via one of the "fetch" methods of a cursor. This is done by the use of built-in typecast functions. If you want to use different typecast functions or add your own if no built-in typecast function is available, then this is possible using the :func:`set_typecast` function. With the :func:`get_typecast` function you can check which function is currently set, and :func:`reset_typecast` allows you to reset the typecast function to its default. If no typecast function is set, then PyGreSQL will return the raw strings from the database. For instance, you will find that PyGreSQL uses the normal ``int`` function to cast PostgreSQL ``int4`` type values to Python:: >>> pgdb.get_typecast('int4') int You can change this to return float values instead:: >>> pgdb.set_typecast('int4', float) >>> con = pgdb.connect(...) >>> cur = con.cursor() >>> cur.execute('select 42::int4').fetchone()[0] 42.0 Note that the connections cache the typecast functions, so you may need to reopen the database connection, or reset the cache of the connection to make this effective, using the following command:: >>> con.type_cache.reset_typecast() The :class:`TypeCache` of the connection can also be used to change typecast functions locally for one database connection only. As a more useful example, we can create a typecast function that casts items of the composite type used as example in the previous section to instances of the corresponding Python class:: >>> con.type_cache.reset_typecast() >>> cast_tuple = con.type_cache.get_typecast('inventory_item') >>> cast_item = lambda value: InventoryItem(*cast_tuple(value)) >>> con.type_cache.set_typecast('inventory_item', cast_item) >>> str(cur.execute("SELECT * FROM on_hand").fetchone()[0]) 'fuzzy dice (from 42, at $1.99)' As you saw in the last section you, PyGreSQL also has a typecast function for JSON, which is the default JSON decoder from the standard library. Let's assume we want to use a slight variation of that decoder in which every integer in JSON is converted to a float in Python. This can be accomplished as follows:: >>> from json import loads >>> cast_json = lambda v: loads(v, parse_int=float) >>> pgdb.set_typecast('json', cast_json) >>> cur.execute("SELECT data FROM json_data").fetchone()[0] [1.0, 2.0, 3.0] Note again that you may need to run ``con.type_cache.reset_typecast()`` to make this effective. Also note that the two types ``json`` and ``jsonb`` have their own typecast functions, so if you use ``jsonb`` instead of ``json``, you need to use this type name when setting the typecast function:: >>> pgdb.set_typecast('jsonb', cast_json) As one last example, let us try to typecast the geometric data type ``circle`` of PostgreSQL into a `SymPy `_ ``Circle`` object. Let's assume we have created and populated a table with two circles, like so: .. code-block:: sql CREATE TABLE circle ( name varchar(8) primary key, circle circle); INSERT INTO circle VALUES ('C1', '<(2, 3), 3>'); INSERT INTO circle VALUES ('C2', '<(1, -1), 4>'); With PostgreSQL we can easily calculate that these two circles overlap:: >>> con.cursor().execute("""SELECT c1.circle && c2.circle ... FROM circle c1, circle c2 ... WHERE c1.name = 'C1' AND c2.name = 'C2'""").fetchone()[0] True However, calculating the intersection points between the two circles using the ``#`` operator does not work (at least not as of PostgreSQL version 9.5). So let' resort to SymPy to find out. To ease importing circles from PostgreSQL to SymPy, we create and register the following typecast function:: >>> from sympy import Point, Circle >>> >>> def cast_circle(s): ... p, r = s[1:-1].rsplit(',', 1) ... p = p[1:-1].split(',') ... return Circle(Point(float(p[0]), float(p[1])), float(r)) ... >>> pgdb.set_typecast('circle', cast_circle) Now we can import the circles in the table into Python quite easily:: >>> circle = {c.name: c.circle for c in con.cursor().execute( ... "SELECT * FROM circle").fetchall()} The result is a dictionary mapping circle names to SymPy ``Circle`` objects. We can verify that the circles have been imported correctly: >>> circle {'C1': Circle(Point(2, 3), 3.0), 'C2': Circle(Point(1, -1), 4.0)} Finally we can find the exact intersection points with SymPy: >>> circle['C1'].intersection(circle['C2']) [Point(29/17 + 64564173230121*sqrt(17)/100000000000000, -80705216537651*sqrt(17)/500000000000000 + 31/17), Point(-64564173230121*sqrt(17)/100000000000000 + 29/17, 80705216537651*sqrt(17)/500000000000000 + 31/17)] PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/connection.rst000066400000000000000000000074121450706350600234010ustar00rootroot00000000000000Connection -- The connection object =================================== .. currentmodule:: pgdb .. class:: Connection These connection objects respond to the following methods. Note that ``pgdb.Connection`` objects also implement the context manager protocol, i.e. you can use them in a ``with`` statement. When the ``with`` block ends, the current transaction will be automatically committed or rolled back if there was an exception, and you won't need to do this manually. close -- close the connection ----------------------------- .. method:: Connection.close() Close the connection now (rather than whenever it is deleted) :rtype: None The connection will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. commit -- commit the connection ------------------------------- .. method:: Connection.commit() Commit any pending transaction to the database :rtype: None Note that connections always use a transaction, unless you set the :attr:`Connection.autocommit` attribute described below. rollback -- roll back the connection ------------------------------------ .. method:: Connection.rollback() Roll back any pending transaction to the database :rtype: None This method causes the database to roll back to the start of any pending transaction. Closing a connection without committing the changes first will cause an implicit rollback to be performed. cursor -- return a new cursor object ------------------------------------ .. method:: Connection.cursor() Return a new cursor object using the connection :returns: a connection object :rtype: :class:`Cursor` This method returns a new :class:`Cursor` object that can be used to operate on the database in the way described in the next section. Attributes that are not part of the standard -------------------------------------------- .. note:: The following attributes are not part of the DB-API 2 standard. .. attribute:: Connection.closed This is *True* if the connection has been closed or has become invalid .. attribute:: Connection.cursor_type The default cursor type used by the connection If you want to use your own custom subclass of the :class:`Cursor` class with he connection, set this attribute to your custom cursor class. You will then get your custom cursor whenever you call :meth:`Connection.cursor`. .. versionadded:: 5.0 .. attribute:: Connection.type_cache A dictionary with the various type codes for the PostgreSQL types This can be used for getting more information on the PostgreSQL database types or changing the typecast functions used for the connection. See the description of the :class:`TypeCache` class for details. .. versionadded:: 5.0 .. attribute:: Connection.autocommit A read/write attribute to get/set the autocommit mode Normally, all DB-API 2 SQL commands are run inside a transaction. Sometimes this behavior is not desired; there are also some SQL commands such as VACUUM which cannot be run inside a transaction. By setting this attribute to ``True`` you can change this behavior so that no transactions will be started for that connection. In this case every executed SQL command has immediate effect on the database and you don't need to call :meth:`Connection.commit` explicitly. In this mode, you can still use ``with con:`` blocks to run parts of the code using the connection ``con`` inside a transaction. By default, this attribute is set to ``False`` which conforms to the behavior specified by the DB-API 2 standard (manual commit required). .. versionadded:: 5.1 PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/cursor.rst000066400000000000000000000366371450706350600225720ustar00rootroot00000000000000Cursor -- The cursor object =========================== .. currentmodule:: pgdb .. class:: Cursor These objects represent a database cursor, which is used to manage the context of a fetch operation. Cursors created from the same connection are not isolated, i.e., any changes done to the database by a cursor are immediately visible by the other cursors. Cursors created from different connections can or can not be isolated, depending on the level of transaction isolation. The default PostgreSQL transaction isolation level is "read committed". Cursor objects respond to the following methods and attributes. Note that ``Cursor`` objects also implement both the iterator and the context manager protocol, i.e. you can iterate over them and you can use them in a ``with`` statement. description -- details regarding the result columns --------------------------------------------------- .. attribute:: Cursor.description This read-only attribute is a sequence of 7-item named tuples. Each of these named tuples contains information describing one result column: - *name* - *type_code* - *display_size* - *internal_size* - *precision* - *scale* - *null_ok* The values for *precision* and *scale* are only set for numeric types. The values for *display_size* and *null_ok* are always ``None``. This attribute will be ``None`` for operations that do not return rows or if the cursor has not had an operation invoked via the :meth:`Cursor.execute` or :meth:`Cursor.executemany` method yet. .. versionchanged:: 5.0 Before version 5.0, this attribute was an ordinary tuple. rowcount -- number of rows of the result ---------------------------------------- .. attribute:: Cursor.rowcount This read-only attribute specifies the number of rows that the last :meth:`Cursor.execute` or :meth:`Cursor.executemany` call produced (for DQL statements like SELECT) or affected (for DML statements like UPDATE or INSERT). It is also set by the :meth:`Cursor.copy_from` and :meth:`Cursor.copy_to` methods. The attribute is -1 in case no such method call has been performed on the cursor or the rowcount of the last operation cannot be determined by the interface. close -- close the cursor ------------------------- .. method:: Cursor.close() Close the cursor now (rather than whenever it is deleted) :rtype: None The cursor will be unusable from this point forward; an :exc:`Error` (or subclass) exception will be raised if any operation is attempted with the cursor. execute -- execute a database operation --------------------------------------- .. method:: Cursor.execute(operation, [parameters]) Prepare and execute a database operation (query or command) :param str operation: the database operation :param parameters: a sequence or mapping of parameters :returns: the cursor, so you can chain commands Parameters may be provided as sequence or mapping and will be bound to variables in the operation. Variables are specified using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. A reference to the operation will be retained by the cursor. If the same operation object is passed in again, then the cursor can optimize its behavior. This is most effective for algorithms where the same operation is used, but different parameters are bound to it (many times). The parameters may also be specified as list of tuples to e.g. insert multiple rows in a single operation, but this kind of usage is deprecated: :meth:`Cursor.executemany` should be used instead. Note that in case this method raises a :exc:`DatabaseError`, you can get information about the error condition that has occurred by introspecting its :attr:`DatabaseError.sqlstate` attribute, which will be the ``SQLSTATE`` error code associated with the error. Applications that need to know which error condition has occurred should usually test the error code, rather than looking at the textual error message. executemany -- execute many similar database operations ------------------------------------------------------- .. method:: Cursor.executemany(operation, [seq_of_parameters]) Prepare and execute many similar database operations (queries or commands) :param str operation: the database operation :param seq_of_parameters: a sequence or mapping of parameter tuples or mappings :returns: the cursor, so you can chain commands Prepare a database operation (query or command) and then execute it against all parameter tuples or mappings found in the sequence *seq_of_parameters*. Parameters are bound to the query using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. callproc -- Call a stored procedure ----------------------------------- .. method:: Cursor.callproc(self, procname, [parameters]): Call a stored database procedure with the given name :param str procname: the name of the database function :param parameters: a sequence of parameters (can be empty or omitted) This method calls a stored procedure (function) in the PostgreSQL database. The sequence of parameters must contain one entry for each input argument that the function expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The function may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. .. versionadded:: 5.0 fetchone -- fetch next row of the query result ---------------------------------------------- .. method:: Cursor.fetchone() Fetch the next row of a query result set :returns: the next row of the query result set :rtype: namedtuple or None Fetch the next row of a query result set, returning a single named tuple, or ``None`` when no more data is available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchmany -- fetch next set of rows of the query result ------------------------------------------------------- .. method:: Cursor.fetchmany([size=None], [keep=False]) Fetch the next set of rows of a query result :param size: the number of rows to be fetched :type size: int or None :param keep: if set to true, will keep the passed arraysize :tpye keep: bool :returns: the next set of rows of the query result :rtype: list of namedtuples Fetch the next set of rows of a query result, returning a list of named tuples. An empty sequence is returned when no more rows are available. The field names of the named tuple are the same as the column names of the database query as long as they are valid Python identifiers. The number of rows to fetch per call is specified by the *size* parameter. If it is not given, the cursor's :attr:`arraysize` determines the number of rows to be fetched. If you set the *keep* parameter to True, this is kept as new :attr:`arraysize`. The method tries to fetch as many rows as indicated by the *size* parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :exc:`Error` (or subclass) exception is raised if the previous call to :meth:`Cursor.execute` or :meth:`Cursor.executemany` did not produce any result set or no call was issued yet. Note there are performance considerations involved with the *size* parameter. For optimal performance, it is usually best to use the :attr:`arraysize` attribute. If the *size* parameter is used, then it is best for it to retain the same value from one :meth:`Cursor.fetchmany` call to the next. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. fetchall -- fetch all rows of the query result ---------------------------------------------- .. method:: Cursor.fetchall() Fetch all (remaining) rows of a query result :returns: the set of all rows of the query result :rtype: list of namedtuples Fetch all (remaining) rows of a query result, returning them as list of named tuples. The field names of the named tuple are the same as the column names of the database query as long as they are valid as field names for named tuples, otherwise they are given positional names. Note that the cursor's :attr:`arraysize` attribute can affect the performance of this operation. .. versionchanged:: 5.0 Before version 5.0, this method returned ordinary tuples. arraysize - the number of rows to fetch at a time ------------------------------------------------- .. attribute:: Cursor.arraysize The number of rows to fetch at a time This read/write attribute specifies the number of rows to fetch at a time with :meth:`Cursor.fetchmany`. It defaults to 1, meaning to fetch a single row at a time. Methods and attributes that are not part of the standard -------------------------------------------------------- .. note:: The following methods and attributes are not part of the DB-API 2 standard. .. method:: Cursor.copy_from(stream, table, [format], [sep], [null], [size], [columns]) Copy data from an input stream to the specified table :param stream: the input stream (must be a file-like object, a string or an iterable returning strings) :param str table: the name of a database table :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param int size: the size of the buffer when reading file-like objects :param list column: an optional list of column names :returns: the cursor, so you can chain commands :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from an input stream on the client side to a database table on the server side using the ``COPY FROM`` command. The input stream can be provided in form of a file-like object (which must have a ``read()`` method), a string, or an iterable returning one row or multiple rows of input data on each iteration. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.copy_to(stream, table, [format], [sep], [null], [decode], [columns]) Copy data from the specified table to an output stream :param stream: the output stream (must be a file-like object or ``None``) :param str table: the name of a database table or a ``SELECT`` query :param str format: the format of the data in the input stream, can be ``'text'`` (the default), ``'csv'``, or ``'binary'`` :param str sep: a single character separator (the default is ``'\t'`` for text and ``','`` for csv) :param str null: the textual representation of the ``NULL`` value, can also be an empty string (the default is ``'\\N'``) :param bool decode: whether decoded strings shall be returned for non-binary formats (the default is ``True``) :param list column: an optional list of column names :returns: a generator if stream is set to ``None``, otherwise the cursor :raises TypeError: parameters with wrong types :raises ValueError: invalid parameters :raises IOError: error when executing the copy operation This method can be used to copy data from a database table on the server side to an output stream on the client side using the ``COPY TO`` command. The output stream can be provided in form of a file-like object (which must have a ``write()`` method). Alternatively, if ``None`` is passed as the output stream, the method will return a generator yielding one row of output data on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a ``SELECT`` query instead of the table name. The format must be text, csv or binary. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of ``NULL`` in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. .. versionadded:: 5.0 .. method:: Cursor.row_factory(row) Process rows before they are returned :param list row: the currently processed row of the result set :returns: the transformed row that the fetch methods shall return This method is used for processing result rows before returning them through one of the fetch methods. By default, rows are returned as named tuples. You can overwrite this method with a custom row factory if you want to return the rows as different kids of objects. This same row factory will then be used for all result sets. If you overwrite this method, the method :meth:`Cursor.build_row_factory` for creating row factories dynamically will be ignored. Note that named tuples are very efficient and can be easily converted to dicts by calling ``row._asdict()``. If you still want to return rows as dicts, you can create a custom cursor class like this:: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {key: value for key, value in zip(self.colnames, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances .. versionadded:: 4.0 .. method:: Cursor.build_row_factory() Build a row factory based on the current description :returns: callable with the signature of :meth:`Cursor.row_factory` This method returns row factories for creating named tuples. It is called whenever a new result set is created, and :attr:`Cursor.row_factory` is then assigned the return value of this method. You can overwrite this method with a custom row factory builder if you want to use different row factories for different result sets. Otherwise, you can also simply overwrite the :meth:`Cursor.row_factory` method. This method will then be ignored. The default implementation that delivers rows as named tuples essentially looks like this:: def build_row_factory(self): return namedtuple('Row', self.colnames, rename=True)._make .. versionadded:: 5.0 .. attribute:: Cursor.colnames The list of columns names of the current result set The values in this list are the same values as the *name* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 .. attribute:: Cursor.coltypes The list of columns types of the current result set The values in this list are the same values as the *type_code* elements in the :attr:`Cursor.description` attribute. Always use the latter if you want to remain standard compliant. .. versionadded:: 5.0 PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/index.rst000066400000000000000000000004351450706350600223470ustar00rootroot00000000000000---------------------------------------------- :mod:`pgdb` --- The DB-API Compliant Interface ---------------------------------------------- .. module:: pgdb Contents ======== .. toctree:: introduction module connection cursor types typecache adaptation PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/introduction.rst000066400000000000000000000013711450706350600237610ustar00rootroot00000000000000Introduction ============ You may either choose to use the "classic" PyGreSQL interface provided by the :mod:`pg` module or else the newer DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. The following part of the documentation covers only the newer :mod:`pgdb` API. **DB-API 2.0** (Python Database API Specification v2.0) is a specification for connecting to databases (not only PostgreSQL) from Python that has been developed by the Python DB-SIG in 1999. The authoritative programming information for the DB-API is :pep:`0249`. .. seealso:: A useful tutorial-like `introduction to the DB-API `_ has been written by Andrew M. Kuchling for the LINUX Journal in 1998. PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/module.rst000066400000000000000000000157201450706350600225300ustar00rootroot00000000000000Module functions and constants ============================== .. currentmodule:: pgdb The :mod:`pgdb` module defines a :func:`connect` function that allows to connect to a database, some global constants describing the capabilities of the module as well as several exception classes. connect -- Open a PostgreSQL connection --------------------------------------- .. function:: connect([dsn], [user], [password], [host], [database], [**kwargs]) Return a new connection to the database :param str dsn: data source name as string :param str user: the database user name :param str password: the database password :param str host: the hostname of the database :param database: the name of the database :param dict kwargs: other connection parameters :returns: a connection object :rtype: :class:`Connection` :raises pgdb.OperationalError: error connecting to the database This function takes parameters specifying how to connect to a PostgreSQL database and returns a :class:`Connection` object using these parameters. If specified, the *dsn* parameter must be a string with the format ``'host:base:user:passwd:opt'``. All of the parts specified in the *dsn* are optional. You can also specify the parameters individually using keyword arguments, which always take precedence. The *host* can also contain a port if specified in the format ``'host:port'``. In the *opt* part of the *dsn* you can pass command-line options to the server. You can pass additional connection parameters using the optional *kwargs* keyword arguments. Example:: con = connect(dsn='myhost:mydb', user='guido', password='234$') .. versionchanged:: 5.0.1 Support for additional parameters passed as *kwargs*. get/set/reset_typecast -- Control the global typecast functions --------------------------------------------------------------- PyGreSQL uses typecast functions to cast the raw data coming from the database to Python objects suitable for the particular database type. These functions take a single string argument that represents the data to be casted and must return the casted value. PyGreSQL provides built-in typecast functions for the common database types, but if you want to change these or add more typecast functions, you can set these up using the following functions. .. note:: The following functions are not part of the DB-API 2 standard. .. method:: get_typecast(typ) Get the global cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. versionadded:: 5.0 .. method:: set_typecast(typ, cast) Set a global typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. versionadded:: 5.0 As of version 5.0.3 you can also use this method to change the typecasting of PostgreSQL array types. You must run ``set_typecast('anyarray', cast)`` in order to do this. The ``cast`` method must take a string value and a cast function for the base type and return the array converted to a Python object. For instance, run ``set_typecast('anyarray', lambda v, c: v)`` to switch off the casting of arrays completely, and always return them encoded as strings. .. method:: reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. versionadded:: 5.0 Note that database connections cache types and their cast functions using connection specific :class:`TypeCache` objects. You can also get, set and reset typecast functions on the connection level using the methods :meth:`TypeCache.get_typecast`, :meth:`TypeCache.set_typecast` and :meth:`TypeCache.reset_typecast` of the :attr:`Connection.type_cache`. This will not affect other connections or future connections. In order to be sure a global change is picked up by a running connection, you must reopen it or call :meth:`TypeCache.reset_typecast` on the :attr:`Connection.type_cache`. Module constants ---------------- .. data:: apilevel The string constant ``'2.0'``, stating that the module is DB-API 2.0 level compliant. .. data:: threadsafety The integer constant 1, stating that the module itself is thread-safe, but the connections are not thread-safe, and therefore must be protected with a lock if you want to use them from different threads. .. data:: paramstyle The string constant ``pyformat``, stating that parameters should be passed using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. Errors raised by this module ---------------------------- The errors that can be raised by the :mod:`pgdb` module are the following: .. exception:: Warning Exception raised for important warnings like data truncations while inserting. .. exception:: Error Exception that is the base class of all other error exceptions. You can use this to catch all errors with one single except statement. Warnings are not considered errors and thus do not use this class as base. .. exception:: InterfaceError Exception raised for errors that are related to the database interface rather than the database itself. .. exception:: DatabaseError Exception raised for errors that are related to the database. In PyGreSQL, this also has a :attr:`DatabaseError.sqlstate` attribute that contains the ``SQLSTATE`` error code of this error. .. exception:: DataError Exception raised for errors that are due to problems with the processed data like division by zero or numeric value out of range. .. exception:: OperationalError Exception raised for errors that are related to the database's operation and not necessarily under the control of the programmer, e.g. an unexpected disconnect occurs, the data source name is not found, a transaction could not be processed, or a memory allocation error occurred during processing. .. exception:: IntegrityError Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails. .. exception:: ProgrammingError Exception raised for programming errors, e.g. table not found or already exists, syntax error in the SQL statement or wrong number of parameters specified. .. exception:: NotSupportedError Exception raised in case a method or database API was used which is not supported by the database. PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/typecache.rst000066400000000000000000000066501450706350600232120ustar00rootroot00000000000000TypeCache -- The internal cache for database types ================================================== .. currentmodule:: pgdb .. class:: TypeCache .. versionadded:: 5.0 The internal :class:`TypeCache` of PyGreSQL is not part of the DB-API 2 standard, but is documented here in case you need full control and understanding of the internal handling of database types. The TypeCache is essentially a dictionary mapping PostgreSQL internal type names and type OIDs to DB-API 2 "type codes" (which are also returned as the *type_code* field of the :attr:`Cursor.description` attribute). These type codes are strings which are equal to the PostgreSQL internal type name, but they are also carrying additional information about the associated PostgreSQL type in the following attributes: - *oid* -- the OID of the type - *len* -- the internal size - *type* -- ``'b'`` = base, ``'c'`` = composite, ... - *category* -- ``'A'`` = Array, ``'B'`` = Boolean, ... - *delim* -- delimiter to be used when parsing arrays - *relid* -- the table OID for composite types For details, see the PostgreSQL documentation on `pg_type `_. In addition to the dictionary methods, the :class:`TypeCache` provides the following methods: .. method:: TypeCache.get_fields(typ) Get the names and types of the fields of composite types :param typ: PostgreSQL type name or OID of a composite type :type typ: str or int :returns: a list of pairs of field names and types :rtype: list .. method:: TypeCache.get_typecast(typ) Get the cast function for the given database type :param str typ: PostgreSQL type name or type code :returns: the typecast function for the specified type :rtype: function or None .. method:: TypeCache.set_typecast(typ, cast) Set a typecast function for the given database type(s) :param typ: PostgreSQL type name or type code, or list of such :type typ: str or list :param cast: the typecast function to be set for the specified type(s) :type typ: str or int The typecast function must take one string object as argument and return a Python object into which the PostgreSQL type shall be casted. If the function takes another parameter named *connection*, then the current database connection will also be passed to the typecast function. This may sometimes be necessary to look up certain database settings. .. method:: TypeCache.reset_typecast([typ]) Reset the typecasts for the specified (or all) type(s) to their defaults :param str typ: PostgreSQL type name or type code, or list of such, or None to reset all typecast functions :type typ: str, list or None .. method:: TypeCache.typecast(value, typ) Cast the given value according to the given database type :param str typ: PostgreSQL type name or type code :returns: the casted value .. note:: Note that the :class:`TypeCache` is always bound to a database connection. You can also get, set and reset typecast functions on a global level using the functions :func:`pgdb.get_typecast`, :func:`pgdb.set_typecast` and :func:`pgdb.reset_typecast`. If you do this, the current database connections will continue to use their already cached typecast functions unless call the :meth:`TypeCache.reset_typecast` method on the :attr:`Connection.type_cache` objects of the running connections. PyGreSQL-PyGreSQL-166b135/docs/contents/pgdb/types.rst000066400000000000000000000131201450706350600223770ustar00rootroot00000000000000Type -- Type objects and constructors ===================================== .. currentmodule:: pgdb .. _type_constructors: Type constructors ----------------- For binding to an operation's input parameters, PostgreSQL needs to have the input in a particular format. However, from the parameters to the :meth:`Cursor.execute` and :meth:`Cursor.executemany` methods it is not always obvious as which PostgreSQL data types they shall be bound. For instance, a Python string could be bound as a simple ``char`` value, or also as a ``date`` or a ``time``. Or a list could be bound as a ``array`` or a ``json`` object. To make the intention clear in such cases, you can wrap the parameters in type helper objects. PyGreSQL provides the constructors defined below to create such objects that can hold special values. When passed to the cursor methods, PyGreSQL can then detect the proper type of the input parameter and bind it accordingly. The :mod:`pgdb` module exports the following type constructors as part of the DB-API 2 standard: .. function:: Date(year, month, day) Construct an object holding a date value .. function:: Time(hour, [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time value .. function:: Timestamp(year, month, day, [hour], [minute], [second], [microsecond], [tzinfo]) Construct an object holding a time stamp value .. function:: DateFromTicks(ticks) Construct an object holding a date value from the given *ticks* value .. function:: TimeFromTicks(ticks) Construct an object holding a time value from the given *ticks* value .. function:: TimestampFromTicks(ticks) Construct an object holding a time stamp from the given *ticks* value .. function:: Binary(bytes) Construct an object capable of holding a (long) binary string value Additionally, PyGreSQL provides the following constructors for PostgreSQL specific data types: .. function:: Interval(days, hours=0, minutes=0, seconds=0, microseconds=0) Construct an object holding a time interval value .. versionadded:: 5.0 .. function:: Uuid([hex], [bytes], [bytes_le], [fields], [int], [version]) Construct an object holding a UUID value .. versionadded:: 5.0 .. function:: Hstore(dict) Construct a wrapper for holding an hstore dictionary .. versionadded:: 5.0 .. function:: Json(obj, [encode]) Construct a wrapper for holding an object serializable to JSON You can pass an optional serialization function as a parameter. By default, PyGreSQL uses :func:`json.dumps` to serialize it. .. function:: Literal(sql) Construct a wrapper for holding a literal SQL string .. versionadded:: 5.0 Example for using a type constructor:: >>> cursor.execute("create table jsondata (data jsonb)") >>> data = {'id': 1, 'name': 'John Doe', 'kids': ['Johnnie', 'Janie']} >>> cursor.execute("insert into jsondata values (%s)", [Json(data)]) .. note:: SQL ``NULL`` values are always represented by the Python *None* singleton on input and output. .. _type_objects: Type objects ------------ .. class:: DbType The :attr:`Cursor.description` attribute returns information about each of the result columns of a query. The *type_code* must compare equal to one of the :class:`DbType` objects defined below. Type objects can be equal to more than one type code (e.g. :class:`DATETIME` is equal to the type codes for ``date``, ``time`` and ``timestamp`` columns). The pgdb module exports the following :class:`DbType` objects as part of the DB-API 2 standard: .. object:: STRING Used to describe columns that are string-based (e.g. ``char``, ``varchar``, ``text``) .. object:: BINARY Used to describe (long) binary columns (``bytea``) .. object:: NUMBER Used to describe numeric columns (e.g. ``int``, ``float``, ``numeric``, ``money``) .. object:: DATETIME Used to describe date/time columns (e.g. ``date``, ``time``, ``timestamp``, ``interval``) .. object:: ROWID Used to describe the ``oid`` column of PostgreSQL database tables .. note:: The following more specific type objects are not part of the DB-API 2 standard. .. object:: BOOL Used to describe ``boolean`` columns .. object:: SMALLINT Used to describe ``smallint`` columns .. object:: INTEGER Used to describe ``integer`` columns .. object:: LONG Used to describe ``bigint`` columns .. object:: FLOAT Used to describe ``float`` columns .. object:: NUMERIC Used to describe ``numeric`` columns .. object:: MONEY Used to describe ``money`` columns .. object:: DATE Used to describe ``date`` columns .. object:: TIME Used to describe ``time`` columns .. object:: TIMESTAMP Used to describe ``timestamp`` columns .. object:: INTERVAL Used to describe date and time ``interval`` columns .. object:: UUID Used to describe ``uuid`` columns .. object:: HSTORE Used to describe ``hstore`` columns .. versionadded:: 5.0 .. object:: JSON Used to describe ``json`` and ``jsonb`` columns .. versionadded:: 5.0 .. object:: ARRAY Used to describe columns containing PostgreSQL arrays .. versionadded:: 5.0 .. object:: RECORD Used to describe columns containing PostgreSQL records .. versionadded:: 5.0 Example for using some type objects:: >>> cursor = con.cursor() >>> cursor.execute("create table jsondata (created date, data jsonb)") >>> cursor.execute("select * from jsondata") >>> (created, data) = (d.type_code for d in cursor.description) >>> created == DATE True >>> created == DATETIME True >>> created == TIME False >>> data == JSON True >>> data == STRING False PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/000077500000000000000000000000001450706350600214365ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/advanced.rst000066400000000000000000000111771450706350600237440ustar00rootroot00000000000000Examples for advanced features ============================== .. currentmodule:: pg In this section, we show how to use some advanced features of PostgreSQL using the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Inheritance ----------- A table can inherit from zero or more tables. A query can reference either all rows of a table or all rows of a table plus all of its descendants. For example, the capitals table inherits from cities table (it inherits all data fields from cities):: >>> data = [('cities', [ ... "'San Francisco', 7.24E+5, 63", ... "'Las Vegas', 2.583E+5, 2174", ... "'Mariposa', 1200, 1953"]), ... ('capitals', [ ... "'Sacramento', 3.694E+5,30, 'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"])] Now, let's populate the tables:: >>> data = ['cities', [ ... "'San Francisco', 7.24E+5, 63" ... "'Las Vegas', 2.583E+5, 2174" ... "'Mariposa', 1200, 1953"], ... 'capitals', [ ... "'Sacramento', 3.694E+5,30, 'CA'", ... "'Madison', 1.913E+5, 845, 'WI'"]] >>> for table, rows in data: ... for row in rows: ... query(f"INSERT INTO {table} VALUES (row)") >>> print(query("SELECT * FROM cities")) name |population|altitude -------------+----------+-------- San Francisco| 724000| 63 Las Vegas | 258300| 2174 Mariposa | 1200| 1953 Sacramento | 369400| 30 Madison | 191300| 845 (5 rows) >>> print(query("SELECT * FROM capitals")) name |population|altitude|state ----------+----------+--------+----- Sacramento| 369400| 30|CA Madison | 191300| 845|WI (2 rows) You can find all cities, including capitals, that are located at an altitude of 500 feet or higher by:: >>> print(query("""SELECT c.name, c.altitude ... FROM cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 Madison | 845 (3 rows) On the other hand, the following query references rows of the base table only, i.e. it finds all cities that are not state capitals and are situated at an altitude of 500 feet or higher:: >>> print(query("""SELECT name, altitude ... FROM ONLY cities ... WHERE altitude > 500""")) name |altitude ---------+-------- Las Vegas| 2174 Mariposa | 1953 (2 rows) Arrays ------ Attributes can be arrays of base types or user-defined types:: >>> query("""CREATE TABLE sal_emp ( ... name text, ... pay_by_quarter int4[], ... pay_by_extra_quarter int8[], ... schedule text[][])""") Insert instances with array attributes. Note the use of braces:: >>> query("""INSERT INTO sal_emp VALUES ( ... 'Bill', '{10000,10000,10000,10000}', ... '{9223372036854775800,9223372036854775800,9223372036854775800}', ... '{{"meeting", "lunch"}, {"training", "presentation"}}')""") >>> query("""INSERT INTO sal_emp VALUES ( ... 'Carol', '{20000,25000,25000,25000}', ... '{9223372036854775807,9223372036854775807,9223372036854775807}', ... '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""") Queries on array attributes:: >>> query("""SELECT name FROM sal_emp WHERE ... sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""") name ----- Carol (1 row) Retrieve third quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") pay_by_quarter -------------- 10000 25000 (2 rows) Retrieve third quarter extra pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") pay_by_extra_quarter -------------------- 9223372036854775800 9223372036854775807 (2 rows) Retrieve first two quarters of extra quarter pay of all employees:: >>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") pay_by_extra_quarter ----------------------------------------- {9223372036854775800,9223372036854775800} {9223372036854775807,9223372036854775807} (2 rows) Select subarrays:: >>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp ... WHERE sal_emp.name = 'Bill'""") schedule ---------------------- {{meeting},{training}} (1 row) PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/basic.rst000066400000000000000000000301751450706350600232570ustar00rootroot00000000000000Basic examples ============== .. currentmodule:: pg In this section, we demonstrate how to use some of the very basic features of PostgreSQL using the classic PyGreSQL interface. Creating a connection to the database ------------------------------------- We start by creating a **connection** to the PostgreSQL database:: >>> from pg import DB >>> db = DB() If you pass no parameters when creating the :class:`DB` instance, then PyGreSQL will try to connect to the database on the local host that has the same name as the current user, and also use that name for login. You can also pass the database name, host, port and login information as parameters when creating the :class:`DB` instance:: >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') The :class:`DB` class of which ``db`` is an object is a wrapper around the lower level :class:`Connection` class of the :mod:`pg` module. The most important method of such connection objects is the ``query`` method that allows you to send SQL commands to the database. Creating tables --------------- The first thing you would want to do in an empty database is creating a table. To do this, you need to send a **CREATE TABLE** command to the database. PostgreSQL has its own set of built-in types that can be used for the table columns. Let us create two tables "weather" and "cities":: >>> db.query("""CREATE TABLE weather ( ... city varchar(80), ... temp_lo int, temp_hi int, ... prcp float8, ... date date)""") >>> db.query("""CREATE TABLE cities ( ... name varchar(80), ... location point)""") .. note:: Keywords are case-insensitive but identifiers are case-sensitive. You can get a list of all tables in the database with:: >>> db.get_tables() ['public.cities', 'public.weather'] Insert data ----------- Now we want to fill our tables with data. An **INSERT** statement is used to insert a new row into a table. There are several ways you can specify what columns the data should go to. Let us insert a row into each of these tables. The simplest case is when the list of values corresponds to the order of the columns specified in the CREATE TABLE command:: >>> db.query("""INSERT INTO weather ... VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") >>> db.query("""INSERT INTO cities ... VALUES ('San Francisco', '(-194.0, 53.0)')""") You can also specify the columns to which the values correspond. The columns can be specified in any order. You may also omit any number of columns, such as with unknown precipitation, below:: >>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo) ... VALUES ('11/29/1994', 'Hayward', 54, 37)""") If you get errors regarding the format of the date values, your database is probably set to a different date style. In this case you must change the date style like this:: >>> db.query("set datestyle = MDY") Instead of explicitly writing the INSERT statement and sending it to the database with the :meth:`DB.query` method, you can also use the more convenient :meth:`DB.insert` method that does the same under the hood:: >>> db.insert('weather', ... date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37) And instead of using keyword parameters, you can also pass the values to the :meth:`DB.insert` method in a single Python dictionary. If you have a Python list with many rows that shall be used to fill a database table quickly, you can use the :meth:`DB.inserttable` method. Retrieving data --------------- After having entered some data into our tables, let's see how we can get the data out again. A **SELECT** statement is used for retrieving data. The basic syntax is: .. code-block:: psql SELECT columns FROM tables WHERE predicates A simple one would be the following query:: >>> q = db.query("SELECT * FROM weather") >>> print(q) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 37| 54| |1994-11-29 (2 rows) You may also specify expressions in the target list. (The 'AS column' specifies the column name of the result. It is optional.) :: >>> print(db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date ... FROM weather""")) city |temp_avg| date -------------+--------+---------- San Francisco| 48|1994-11-27 Hayward | 45|1994-11-29 (2 rows) If you want to retrieve rows that satisfy certain condition (i.e. a restriction), specify the condition in a WHERE clause. The following retrieves the weather of San Francisco on rainy days:: >>> print(db.query("""SELECT * FROM weather ... WHERE city = 'San Francisco' AND prcp > 0.0""")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Here is a more complicated one. Duplicates are removed when DISTINCT is specified. ORDER BY specifies the column to sort on. (Just to make sure the following won't confuse you, DISTINCT and ORDER BY can be used separately.) :: >>> print(db.query("SELECT DISTINCT city FROM weather ORDER BY city")) city ------------- Hayward San Francisco (2 rows) So far we have only printed the output of a SELECT query. The object that is returned by the query is an instance of the :class:`Query` class that can print itself in the nicely formatted way we saw above. But you can also retrieve the results as a list of tuples, by using the :meth:`Query.getresult` method:: >>> from pprint import pprint >>> q = db.query("SELECT * FROM weather") >>> pprint(q.getresult()) [('San Francisco', 46, 50, 0.25, '1994-11-27'), ('Hayward', 37, 54, None, '1994-11-29')] Here we used pprint to print out the returned list in a nicely formatted way. If you want to retrieve the results as a list of dictionaries instead of tuples, use the :meth:`Query.dictresult` method instead:: >>> pprint(q.dictresult()) [{'city': 'San Francisco', 'date': '1994-11-27', 'prcp': 0.25, 'temp_hi': 50, 'temp_lo': 46}, {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37}] Finally, you can also retrieve the results as a list of named tuples, using the :meth:`Query.namedresult` method. This can be a good compromise between simple tuples and the more memory intensive dictionaries: >>> for row in q.namedresult(): ... print(row.city, row.date) ... San Francisco 1994-11-27 Hayward 1994-11-29 If you only want to retrieve a single row of data, you can use the more convenient :meth:`DB.get` method that does the same under the hood:: >>> d = dict(city='Hayward') >>> db.get('weather', d, 'city') >>> pprint(d) {'city': 'Hayward', 'date': '1994-11-29', 'prcp': None, 'temp_hi': 54, 'temp_lo': 37} As you see, the :meth:`DB.get` method returns a dictionary with the column names as keys. In the third parameter you can specify which column should be looked up in the WHERE statement of the SELECT statement that is executed by the :meth:`DB.get` method. You normally don't need it when the table was created with a primary key. Retrieving data into other tables --------------------------------- A SELECT ... INTO statement can be used to retrieve data into another table:: >>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather ... WHERE city = 'San Francisco' and prcp > 0.0""") This fills a temporary table "temptab" with a subset of the data in the original "weather" table. It can be listed with:: >>> print(db.query("SELECT * from temptab")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 (1 row) Aggregates ---------- Let's try the following query:: >>> print(db.query("SELECT max(temp_lo) FROM weather")) max --- 46 (1 row) You can also use aggregates with the GROUP BY clause:: >>> print(db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city")) city |max -------------+--- Hayward | 37 San Francisco| 46 (2 rows) Joining tables -------------- Queries can access multiple tables at once or access the same table in such a way that multiple instances of the table are being processed at the same time. Suppose we want to find all the records that are in the temperature range of other records. W1 and W2 are aliases for weather. We can use the following query to achieve that:: >>> print(db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, ... W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 ... WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""")) city |temp_lo|temp_hi| city |temp_lo|temp_hi -------+-------+-------+-------------+-------+------- Hayward| 37| 54|San Francisco| 46| 50 (1 row) Now let's join two different tables. The following joins the "weather" table and the "cities" table:: >>> print(db.query("""SELECT city, location, prcp, date ... FROM weather, cities ... WHERE name = city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Since the column names are all different, we don't have to specify the table name. If you want to be clear, you can do the following. They give identical results, of course:: >>> print(db.query("""SELECT w.city, c.location, w.prcp, w.date ... FROM weather w, cities c WHERE c.name = w.city""")) city |location |prcp| date -------------+---------+----+---------- San Francisco|(-194,53)|0.25|1994-11-27 (1 row) Updating data ------------- It you want to change the data that has already been inserted into a database table, you will need the **UPDATE** statement. Suppose you discover the temperature readings are all off by 2 degrees as of Nov 28, you may update the data as follow:: >>> db.query("""UPDATE weather ... SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 ... WHERE date > '11/28/1994'""") '1' >>> print(db.query("SELECT * from weather")) city |temp_lo|temp_hi|prcp| date -------------+-------+-------+----+---------- San Francisco| 46| 50|0.25|1994-11-27 Hayward | 35| 52| |1994-11-29 (2 rows) Note that the UPDATE statement returned the string ``'1'``, indicating that exactly one row of data has been affected by the update. If you retrieved one row of data as a dictionary using the :meth:`DB.get` method, then you can also update that row with the :meth:`DB.update` method. Deleting data ------------- To delete rows from a table, a **DELETE** statement can be used. Suppose you are no longer interested in the weather of Hayward, you can do the following to delete those rows from the table:: >>> db.query("DELETE FROM weather WHERE city = 'Hayward'") '1' Again, you get the string ``'1'`` as return value, indicating that exactly one row of data has been deleted. You can also delete all the rows in a table by doing the following. This is different from DROP TABLE which removes the table itself in addition to the removing the rows, as explained in the next section. :: >>> db.query("DELETE FROM weather") '1' >>> print(db.query("SELECT * from weather")) city|temp_lo|temp_hi|prcp|date ----+-------+-------+----+---- (0 rows) Since only one row was left in the table, the DELETE query again returns the string ``'1'``. The SELECT query now gives an empty result. If you retrieved a row of data as a dictionary using the :meth:`DB.get` method, then you can also delete that row with the :meth:`DB.delete` method. Removing the tables ------------------- The **DROP TABLE** command is used to remove tables. After you have done this, you can no longer use those tables:: >>> db.query("DROP TABLE weather, cities") >>> db.query("select * from weather") pg.ProgrammingError: Error: Relation "weather" does not exist PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/func.rst000066400000000000000000000115131450706350600231240ustar00rootroot00000000000000Examples for using SQL functions ================================ .. currentmodule:: pg We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Creating SQL Functions on Base Types ------------------------------------ A **CREATE FUNCTION** statement lets you create a new function that can be used in expressions (in SELECT, INSERT, etc.). We will start with functions that return values of base types. Let's create a simple SQL function that takes no arguments and returns 1:: >>> query("""CREATE FUNCTION one() RETURNS int4 ... AS 'SELECT 1 as ONE' LANGUAGE SQL""") Functions can be used in any expressions (eg. in the target list or qualifications):: >>> print(db.query("SELECT one() AS answer")) answer ------ 1 (1 row) Here's how you create a function that takes arguments. The following function returns the sum of its two arguments:: >>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 ... AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""") >>> print(query("SELECT add_em(1, 2) AS answer")) answer ------ 3 (1 row) Creating SQL Functions on Composite Types ----------------------------------------- It is also possible to create functions that return values of composite types. Before we create more sophisticated functions, let's populate an EMP table:: >>> query("""CREATE TABLE EMP ( ... name text, ... salary int4, ... age f int4, ... dept varchar(16))""") >>> emps = ["'Sam', 1200, 16, 'toy'", ... "'Claire', 5000, 32, 'shoe'", ... "'Andy', -1000, 2, 'candy'", ... "'Bill', 4200, 36, 'shoe'", ... "'Ginger', 4800, 30, 'candy'"] >>> for emp in emps: ... query(f"INSERT INTO EMP VALUES ({emp})") Every INSERT statement will return a '1' indicating that it has inserted one row into the EMP table. The argument of a function can also be a tuple. For instance, *double_salary* takes a tuple of the EMP table:: >>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 ... AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""") >>> print(query("""SELECT name, double_salary(EMP) AS dream ... FROM EMP WHERE EMP.dept = 'toy'""")) name|dream ----+----- Sam | 2400 (1 row) The return value of a function can also be a tuple. However, make sure that the expressions in the target list are in the same order as the columns of EMP:: >>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$ ... SELECT 'None'::text AS name, ... 1000 AS salary, ... 25 AS age, ... 'None'::varchar(16) AS dept ... $$ LANGUAGE SQL""") You can then extract a column out of the resulting tuple by using the "function notation" for projection columns (i.e. ``bar(foo)`` is equivalent to ``foo.bar``). Note that ``new_emp().name`` isn't supported:: >>> print(query("SELECT name(new_emp()) AS nobody")) nobody ------ None (1 row) Let's try one more function that returns tuples:: >>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP ... AS 'SELECT * FROM EMP where salary > 1500' ... LANGUAGE SQL""") >>> query("SELECT name(high_pay()) AS overpaid") overpaid -------- Claire Bill Ginger (3 rows) Creating SQL Functions with multiple SQL statements --------------------------------------------------- You can also create functions that do more than just a SELECT. You may have noticed that Andy has a negative salary. We'll create a function that removes employees with negative salaries:: >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Andy | -1000| 2|candy Bill | 4200| 36|shoe Ginger| 4800| 30|candy (5 rows) >>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS ... 'DELETE FROM EMP WHERE EMP.salary < 0; ... SELECT 1 AS ignore_this' ... LANGUAGE SQL""") >>> query("SELECT clean_EMP()") clean_emp --------- 1 (1 row) >>> query("SELECT * FROM EMP") name |salary|age|dept ------+------+---+----- Sam | 1200| 16|toy Claire| 5000| 32|shoe Bill | 4200| 36|shoe Ginger| 4800| 30|candy (4 rows) Remove functions that were created in this example -------------------------------------------------- We can remove the functions that we have created in this example and the table EMP, by using the DROP command:: query("DROP FUNCTION clean_EMP()") query("DROP FUNCTION high_pay()") query("DROP FUNCTION new_emp()") query("DROP FUNCTION add_em(int4, int4)") query("DROP FUNCTION one()") query("DROP TABLE EMP CASCADE") PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/index.rst000066400000000000000000000006351450706350600233030ustar00rootroot00000000000000------------------- A PostgreSQL Primer ------------------- The examples in this chapter of the documentation have been taken from the PostgreSQL manual. They demonstrate some PostgreSQL features using the classic PyGreSQL interface. They can serve as an introduction to PostgreSQL, but not so much as examples for the use of PyGreSQL. Contents ======== .. toctree:: basic advanced func syscat PyGreSQL-PyGreSQL-166b135/docs/contents/postgres/syscat.rst000066400000000000000000000110731450706350600235000ustar00rootroot00000000000000Examples for using the system catalogs ====================================== .. currentmodule:: pg The system catalogs are regular tables where PostgreSQL stores schema metadata, such as information about tables and columns, and internal bookkeeping information. You can drop and recreate the tables, add columns, insert and update values, and severely mess up your system that way. Normally, one should not change the system catalogs by hand: there are SQL commands to make all supported changes. For example, CREATE DATABASE inserts a row into the *pg_database* catalog — and actually creates the database on disk. It this section we want to show examples for how to parse some of the system catalogs, making queries with the classic PyGreSQL interface. We assume that you have already created a connection to the PostgreSQL database, as explained in the :doc:`basic`:: >>> from pg import DB >>> db = DB() >>> query = db.query Lists indices ------------- This query lists all simple indices in the database:: print(query("""SELECT bc.relname AS class_name, ic.relname AS index_name, a.attname FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid AND NOT a.attisdropped AND a.attnum>0 ORDER BY class_name, index_name, attname""")) List user defined attributes ---------------------------- This query lists all user-defined attributes and their types in user-defined tables:: print(query("""SELECT c.relname, a.attname, format_type(a.atttypid, a.atttypmod) FROM pg_class c, pg_attribute a WHERE c.relkind = 'r' AND c.relnamespace!=ALL(ARRAY[ 'pg_catalog','pg_toast', 'information_schema']::regnamespace[]) AND a.attnum > 0 AND a.attrelid = c.oid AND NOT a.attisdropped ORDER BY relname, attname""")) List user defined base types ---------------------------- This query lists all user defined base types:: print(query("""SELECT r.rolname, t.typname FROM pg_type t, pg_authid r WHERE r.oid = t.typowner AND t.typrelid = '0'::oid and t.typelem = '0'::oid AND r.rolname != 'postgres' ORDER BY rolname, typname""")) List operators -------------- This query lists all right-unary operators:: print(query("""SELECT o.oprname AS right_unary, lt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type lt, pg_type result WHERE o.oprkind='r' and o.oprleft = lt.oid AND o.oprresult = result.oid ORDER BY operand""")) This query lists all left-unary operators:: print(query("""SELECT o.oprname AS left_unary, rt.typname AS operand, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type result WHERE o.oprkind='l' AND o.oprright = rt.oid AND o.oprresult = result.oid ORDER BY operand""")) And this one lists all of the binary operators:: print(query("""SELECT o.oprname AS binary_op, rt.typname AS right_opr, lt.typname AS left_opr, result.typname AS return_type FROM pg_operator o, pg_type rt, pg_type lt, pg_type result WHERE o.oprkind = 'b' AND o.oprright = rt.oid AND o.oprleft = lt.oid AND o.oprresult = result.oid""")) List functions of a language ---------------------------- Given a programming language, this query returns the name, args and return type from all functions of a language:: language = 'sql' print(query("""SELECT p.proname, p.pronargs, t.typname FROM pg_proc p, pg_language l, pg_type t WHERE p.prolang = l.oid AND p.prorettype = t.oid AND l.lanname = $1 ORDER BY proname""", (language,))) List aggregate functions ------------------------ This query lists all of the aggregate functions and the type to which they can be applied:: print(query("""SELECT p.proname, t.typname FROM pg_aggregate a, pg_proc p, pg_type t WHERE a.aggfnoid = p.oid and p.proargtypes[0] = t.oid ORDER BY proname, typname""")) List operator families ---------------------- The following query lists all defined operator families and all the operators included in each family:: print(query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator FROM pg_am am, pg_opfamily opf, pg_amop amop WHERE opf.opfmethod = am.oid AND amop.amopfamily = opf.oid ORDER BY amname, opfname, amopopr""")) PyGreSQL-PyGreSQL-166b135/docs/contents/tutorial.rst000066400000000000000000000224031450706350600221660ustar00rootroot00000000000000First Steps with PyGreSQL ========================= In this small tutorial we show you the basic operations you can perform with both flavors of the PyGreSQL interface. Please choose your flavor: .. contents:: :local: First Steps with the classic PyGreSQL Interface ----------------------------------------------- .. currentmodule:: pg Before doing anything else, it's necessary to create a database connection. To do this, simply import the :class:`DB` wrapper class and create an instance of it, passing the necessary connection parameters, like this:: >>> from pg import DB >>> db = DB(dbname='testdb', host='pgserver', port=5432, ... user='scott', passwd='tiger') You can omit one or even all parameters if you want to use their default values. PostgreSQL will use the name of the current operating system user as the login and the database name, and will try to connect to the local host on port 5432 if nothing else is specified. The `db` object has all methods of the lower-level :class:`Connection` class plus some more convenience methods provided by the :class:`DB` wrapper. You can now execute database queries using the :meth:`DB.query` method:: >>> db.query("create table fruits(id serial primary key, name varchar)") You can list all database tables with the :meth:`DB.get_tables` method:: >>> db.get_tables() ['public.fruits'] To get the attributes of the *fruits* table, use :meth:`DB.get_attnames`:: >>> db.get_attnames('fruits') {'id': 'int', 'name': 'text'} Verify that you can insert into the newly created *fruits* table: >>> db.has_table_privilege('fruits', 'insert') True You can insert a new row into the table using the :meth:`DB.insert` method, for example:: >>> db.insert('fruits', name='apple') {'name': 'apple', 'id': 1} Note how this method returns the full row as a dictionary including its *id* column that has been generated automatically by a database sequence. You can also pass a dictionary to the :meth:`DB.insert` method instead of or in addition to using keyword arguments. Let's add another row to the table: >>> banana = db.insert('fruits', name='banana') Or, you can add a whole bunch of fruits at the same time using the :meth:`Connection.inserttable` method. Note that this method uses the COPY command of PostgreSQL to insert all data in one batch operation, which is much faster than sending many individual INSERT commands:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> data = list(enumerate(more_fruits, start=3)) >>> db.inserttable('fruits', data) We can now query the database for all rows that have been inserted into the *fruits* table:: >>> print(db.query('select * from fruits')) id| name --+---------- 1|apple 2|banana 3|cherimaya 4|durian 5|eggfruit 6|fig 7|grapefruit (7 rows) Instead of simply printing the :class:`Query` instance that has been returned by this query, we can also request the data as list of tuples:: >>> q = db.query('select * from fruits') >>> q.getresult() ... [(1, 'apple'), ..., (7, 'grapefruit')] Instead of a list of tuples, we can also request a list of dicts:: >>> q.dictresult() [{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}] You can also return the rows as named tuples:: >>> rows = q.namedresult() >>> rows[3].name 'durian' In PyGreSQL 5.1 and newer, you can also use the :class:`Query` instance directly as an iterable that yields the rows as tuples, and there are also methods that return iterables for rows as dictionaries, named tuples or scalar values. Other methods like :meth:`Query.one` or :meth:`Query.onescalar` return only one row or only the first field of that row. You can get the number of rows with the :func:`len` function. Using the method :meth:`DB.get_as_dict`, you can easily import the whole table into a Python dictionary mapping the primary key *id* to the *name*:: >>> db.get_as_dict('fruits', scalar=True) {1: 'apple', 2: 'banana', 3: 'cherimaya', 4: 'durian', 5: 'eggfruit', 6: 'fig', 7: 'grapefruit', 8: 'apple', 9: 'banana'} To change a single row in the database, you can use the :meth:`DB.update` method. For instance, if you want to capitalize the name 'banana':: >>> db.update('fruits', banana, name=banana['name'].capitalize()) {'id': 2, 'name': 'Banana'} >>> print(db.query('select * from fruits where id between 1 and 3')) id| name --+--------- 1|apple 2|Banana 3|cherimaya (3 rows) Let's also capitalize the other names in the database:: >>> db.query('update fruits set name=initcap(name)') '7' The returned string `'7'` tells us the number of updated rows. It is returned as a string to discern it from an OID which will be returned as an integer, if a new row has been inserted into a table with an OID column. To delete a single row from the database, use the :meth:`DB.delete` method:: >>> db.delete('fruits', banana) 1 The returned integer value `1` tells us that one row has been deleted. If we try it again, the method returns the integer value `0`. Naturally, this method can only return 0 or 1:: >>> db.delete('fruits', banana) 0 Of course, we can insert the row back again:: >>> db.insert('fruits', banana) {'id': 2, 'name': 'Banana'} If we want to change a different row, we can get its current state with:: >>> apple = db.get('fruits', 1) >>> apple {'name': 'Apple', 'id': 1} We can duplicate the row like this:: >>> db.insert('fruits', apple, id=8) {'id': 8, 'name': 'Apple'} To remove the duplicated row, we can do:: >>> db.delete('fruits', id=8) 1 Finally, to remove the table from the database and close the connection:: >>> db.query("drop table fruits") >>> db.close() For more advanced features and details, see the reference: :doc:`pg/index` First Steps with the DB-API 2.0 Interface ----------------------------------------- .. currentmodule:: pgdb As with the classic interface, the first thing you need to do is to create a database connection. To do this, use the function :func:`pgdb.connect` in the :mod:`pgdb` module, passing the connection parameters:: >>> from pgdb import connect >>> con = connect(database='testdb', host='pgserver:5432', ... user='scott', password='tiger') As in the classic interface, you can omit parameters if they are the default values used by PostgreSQL. To do anything with the connection, you need to request a cursor object from it, which is thought of as the Python representation of a database cursor. The connection has a method that lets you get a cursor:: >>> cursor = con.cursor() The cursor has a method that lets you execute database queries:: >>> cursor.execute("create table fruits(" ... "id serial primary key, name varchar)") You can also use this method to insert data into the table:: >>> cursor.execute("insert into fruits (name) values ('apple')") You can pass parameters in a safe way:: >>> cursor.execute("insert into fruits (name) values (%s)", ('banana',)) To insert multiple rows at once, you can use the following method:: >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() >>> parameters = [(name,) for name in more_fruits] >>> cursor.executemany("insert into fruits (name) values (%s)", parameters) The cursor also has a :meth:`Cursor.copy_from` method to quickly insert large amounts of data into the database, and a :meth:`Cursor.copy_to` method to quickly dump large amounts of data from the database, using the PostgreSQL COPY command. Note however, that these methods are an extension provided by PyGreSQL, they are not part of the DB-API 2 standard. Also note that the DB API 2.0 interface does not have an autocommit as you may be used from PostgreSQL. So in order to make these inserts permanent, you need to commit them to the database:: >>> con.commit() If you end the program without calling the commit method of the connection, or if you call the rollback method of the connection, then the changes will be discarded. In a similar way, you can update or delete rows in the database, executing UPDATE or DELETE statements instead of INSERT statements. To fetch rows from the database, execute a SELECT statement first. Then you can use one of several fetch methods to retrieve the results. For instance, to request a single row:: >>> cursor.execute('select * from fruits where id=1') >>> cursor.fetchone() Row(id=1, name='apple') The result is a named tuple. This means you can access its elements either using an index number as for an ordinary tuple, or using the column name as for access to object attributes. To fetch all rows of the query, use this method instead:: >>> cursor.execute('select * from fruits') >>> cursor.fetchall() [Row(id=1, name='apple'), ..., Row(id=7, name='grapefruit')] The output is a list of named tuples. If you want to fetch only a limited number of rows from the query:: >>> cursor.execute('select * from fruits') >>> cursor.fetchmany(2) [Row(id=1, name='apple'), Row(id=2, name='banana')] Finally, to remove the table from the database and close the connection:: >>> db.execute("drop table fruits") >>> cur.close() >>> con.close() For more advanced features and details, see the reference: :doc:`pgdb/index` PyGreSQL-PyGreSQL-166b135/docs/copyright.rst000066400000000000000000000024551450706350600205030ustar00rootroot00000000000000Copyright notice ================ Written by D'Arcy J.M. Cain (darcy@druid.net) Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain (darcy@PyGreSQL.org) Further modifications copyright (c) 2009-2023 by the PyGreSQL team. Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. In this license the term "AUTHORS" refers to anyone who has contributed code to PyGreSQL. IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. PyGreSQL-PyGreSQL-166b135/docs/download/000077500000000000000000000000001450706350600175425ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/docs/download/download.rst000066400000000000000000000024701450706350600221060ustar00rootroot00000000000000Current PyGreSQL versions ------------------------- You can download PyGreSQL from the **Python Package Index** at * https://pypi.org/project/PyGreSQL/#files **Linux RPM** packages can be found attached to the GitHub release at * https://github.com/PyGreSQL/PyGreSQL/releases/ **CentOS** packages can be found on the pkcs.org site * https://pkgs.org/search/?q=pygresql **Debian** packages can be found at * https://packages.debian.org/search?suite=all&searchon=names&keywords=pygresql **FreeBSD** packages are available in their ports collection * http://www.freebsd.org/cgi/cvsweb.cgi/ports/databases/py-PyGreSQL/ **NetBSD** packages are available in their pkgsrc collection * https://pkgsrc.se/databases/py-postgresql **openSUSE** packages are available through their build service at * https://software.opensuse.org/package/PyGreSQL?search_term=pygresql **Ubuntu** packages are available from Launchpad at * https://launchpad.net/ubuntu/+source/pygresql **Windows binaries** (as wheels) are available at * https://pypi.org/project/PyGreSQL/#files **Windows installers** (EXE and MSI) are attached to the GitHub release at * https://github.com/PyGreSQL/PyGreSQL/releases/ Older PyGreSQL versions ----------------------- You can look for older PyGreSQL versions at * https://pypi.org/project/PyGreSQL/#history PyGreSQL-PyGreSQL-166b135/docs/download/files.rst000066400000000000000000000014471450706350600214040ustar00rootroot00000000000000Distribution files ------------------ ============== = pg/ the "classic" PyGreSQL package pgdb/ a DB-SIG DB-API 2.0 compliant API wrapper for PyGreSQL ext/ the source files for the C extension module docs/ the documentation directory The documentation has been created with Sphinx. All text files are in ReST format; a HTML version of the documentation can be created with "make html". tests/ a suite of unit tests for PyGreSQL pyproject.toml contains project metadata and the build system requirements setup.py the Python setup script used for building the C extension LICENSE.text contains the license information for PyGreSQL README.rst a summary of the PyGreSQL project ============== = PyGreSQL-PyGreSQL-166b135/docs/download/index.rst000066400000000000000000000010221450706350600213760ustar00rootroot00000000000000Download information ==================== .. include:: download.rst Changes and Future Development ------------------------------ For a list of all changes in the current version |version| and in past versions, have a look at the :doc:`../contents/changelog`. The section on :doc:`../community/index` lists ideas for future developments and ways to participate. Installation ------------ Please read the chapter on :doc:`../contents/install` in our documentation. .. include:: files.rst .. include:: ../community/homes.rstPyGreSQL-PyGreSQL-166b135/docs/index.rst000066400000000000000000000002321450706350600175710ustar00rootroot00000000000000Welcome to PyGreSQL =================== .. toctree:: :maxdepth: 2 about copyright download/index contents/index community/index PyGreSQL-PyGreSQL-166b135/docs/make.bat000066400000000000000000000014401450706350600173370ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd PyGreSQL-PyGreSQL-166b135/docs/requirements.txt000066400000000000000000000000151450706350600212130ustar00rootroot00000000000000sphinx>=7,<8 PyGreSQL-PyGreSQL-166b135/ext/000077500000000000000000000000001450706350600156035ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/ext/pgconn.c000066400000000000000000001562361450706350600172500ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The connection object - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate connection object. */ static void conn_dealloc(connObject *self) { if (self->cnx) { Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS } Py_XDECREF(self->cast_hook); Py_XDECREF(self->notice_receiver); PyObject_Del(self); } /* Get connection attributes. */ static PyObject * conn_getattr(connObject *self, PyObject *nameobj) { const char *name = PyUnicode_AsUTF8(nameobj); /* * Although we could check individually, there are only a few * attributes that don't require a live connection and unless someone * has an urgent need, this will have to do. */ /* first exception - close which returns a different error */ if (strcmp(name, "close") && !self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* list PostgreSQL connection fields */ /* postmaster host */ if (!strcmp(name, "host")) { char *r = PQhost(self->cnx); if (!r || r[0] == '/') /* this can return a Unix socket path */ r = "localhost"; return PyUnicode_FromString(r); } /* postmaster port */ if (!strcmp(name, "port")) return PyLong_FromLong(atol(PQport(self->cnx))); /* selected database */ if (!strcmp(name, "db")) return PyUnicode_FromString(PQdb(self->cnx)); /* selected options */ if (!strcmp(name, "options")) return PyUnicode_FromString(PQoptions(self->cnx)); /* error (status) message */ if (!strcmp(name, "error")) return PyUnicode_FromString(PQerrorMessage(self->cnx)); /* connection status : 1 - OK, 0 - BAD */ if (!strcmp(name, "status")) return PyLong_FromLong(PQstatus(self->cnx) == CONNECTION_OK ? 1 : 0); /* provided user name */ if (!strcmp(name, "user")) return PyUnicode_FromString(PQuser(self->cnx)); /* protocol version */ if (!strcmp(name, "protocol_version")) return PyLong_FromLong(PQprotocolVersion(self->cnx)); /* backend version */ if (!strcmp(name, "server_version")) return PyLong_FromLong(PQserverVersion(self->cnx)); /* descriptor number of connection socket */ if (!strcmp(name, "socket")) { return PyLong_FromLong(PQsocket(self->cnx)); } /* PID of backend process */ if (!strcmp(name, "backend_pid")) { return PyLong_FromLong(PQbackendPID(self->cnx)); } /* whether the connection uses SSL */ if (!strcmp(name, "ssl_in_use")) { if (PQsslInUse(self->cnx)) { Py_INCREF(Py_True); return Py_True; } else { Py_INCREF(Py_False); return Py_False; } } /* SSL attributes */ if (!strcmp(name, "ssl_attributes")) { return get_ssl_attributes(self->cnx); } return PyObject_GenericGetAttr((PyObject *)self, nameobj); } /* Check connection validity. */ static int _check_cnx_obj(connObject *self) { if (!self || !self->valid || !self->cnx) { set_error_msg(OperationalError, "Connection has been closed"); return 0; } return 1; } /* Create source object. */ static char conn_source__doc__[] = "source() -- create a new source object for this connection"; static PyObject * conn_source(connObject *self, PyObject *noargs) { sourceObject *source_obj; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* allocates new query object */ if (!(source_obj = PyObject_New(sourceObject, &sourceType))) { return NULL; } /* initializes internal parameters */ Py_XINCREF(self); source_obj->pgcnx = self; source_obj->result = NULL; source_obj->valid = 1; source_obj->arraysize = PG_ARRAYSIZE; return (PyObject *)source_obj; } /* For a non-query result, set the appropriate error status, return the appropriate value, and free the result set. */ static PyObject * _conn_non_query_result(int status, PGresult *result, PGconn *cnx) { switch (status) { case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute query", cnx, result); break; case PGRES_COMMAND_OK: { /* INSERT, UPDATE, DELETE */ Oid oid = PQoidValue(result); if (oid == InvalidOid) { /* not a single insert */ char *ret = PQcmdTuples(result); if (ret[0]) { /* return number of rows affected */ PyObject *obj = PyUnicode_FromString(ret); PQclear(result); return obj; } PQclear(result); Py_INCREF(Py_None); return Py_None; } /* for a single insert, return the oid */ PQclear(result); return PyLong_FromLong((long)oid); } case PGRES_COPY_OUT: /* no data will be received */ case PGRES_COPY_IN: PQclear(result); Py_INCREF(Py_None); return Py_None; default: set_error_msg(InternalError, "Unknown result status"); } PQclear(result); return NULL; /* error detected on query */ } /* Base method for execution of all different kinds of queries */ static PyObject * _conn_query(connObject *self, PyObject *args, int prepared, int async) { PyObject *query_str_obj, *param_obj = NULL; PGresult *result; queryObject *query_obj; char *query; int encoding, status, nparms = 0; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "O|O", &query_str_obj, ¶m_obj)) { return NULL; } encoding = PQclientEncoding(self->cnx); if (PyBytes_Check(query_str_obj)) { query = PyBytes_AsString(query_str_obj); query_str_obj = NULL; } else if (PyUnicode_Check(query_str_obj)) { query_str_obj = get_encoded_string(query_str_obj, encoding); if (!query_str_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(query_str_obj); } else { PyErr_SetString(PyExc_TypeError, "Method query() expects a string as first argument"); return NULL; } /* If param_obj is passed, ensure it's a non-empty tuple. We want to treat * an empty tuple the same as no argument since we'll get that when the * caller passes no arguments to db.query(), and historic behaviour was * to call PQexec() in that case, which can execute multiple commands. */ if (param_obj) { param_obj = PySequence_Fast( param_obj, "Method query() expects a sequence as second argument"); if (!param_obj) { Py_XDECREF(query_str_obj); return NULL; } nparms = (int)PySequence_Fast_GET_SIZE(param_obj); /* if there's a single argument and it's a list or tuple, it * contains the positional arguments. */ if (nparms == 1) { PyObject *first_obj = PySequence_Fast_GET_ITEM(param_obj, 0); if (PyList_Check(first_obj) || PyTuple_Check(first_obj)) { Py_DECREF(param_obj); param_obj = PySequence_Fast(first_obj, NULL); nparms = (int)PySequence_Fast_GET_SIZE(param_obj); } } } /* gets result */ if (nparms) { /* prepare arguments */ PyObject **str, **s; const char **parms, **p; register int i; str = (PyObject **)PyMem_Malloc((size_t)nparms * sizeof(*str)); parms = (const char **)PyMem_Malloc((size_t)nparms * sizeof(*parms)); if (!str || !parms) { PyMem_Free((void *)parms); PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); return PyErr_NoMemory(); } /* convert optional args to a list of strings -- this allows * the caller to pass whatever they like, and prevents us * from having to map types to OIDs */ for (i = 0, s = str, p = parms; i < nparms; ++i, ++p) { PyObject *obj = PySequence_Fast_GET_ITEM(param_obj, i); if (obj == Py_None) { *p = NULL; } else if (PyBytes_Check(obj)) { *p = PyBytes_AsString(obj); } else if (PyUnicode_Check(obj)) { PyObject *str_obj = get_encoded_string(obj, encoding); if (!str_obj) { PyMem_Free((void *)parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* pass the UnicodeEncodeError */ return NULL; } *s++ = str_obj; *p = PyBytes_AsString(str_obj); } else { PyObject *str_obj = PyObject_Str(obj); if (!str_obj) { PyMem_Free((void *)parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); PyErr_SetString( PyExc_TypeError, "Query parameter has no string representation"); return NULL; } *s++ = str_obj; *p = PyUnicode_AsUTF8(str_obj); } } Py_BEGIN_ALLOW_THREADS if (async) { status = PQsendQueryParams(self->cnx, query, nparms, NULL, (const char *const *)parms, NULL, NULL, 0); result = NULL; } else { result = prepared ? PQexecPrepared(self->cnx, query, nparms, parms, NULL, NULL, 0) : PQexecParams(self->cnx, query, nparms, NULL, parms, NULL, NULL, 0); status = result != NULL; } Py_END_ALLOW_THREADS PyMem_Free((void *)parms); while (s != str) { s--; Py_DECREF(*s); } PyMem_Free(str); } else { Py_BEGIN_ALLOW_THREADS if (async) { status = PQsendQuery(self->cnx, query); result = NULL; } else { result = prepared ? PQexecPrepared(self->cnx, query, 0, NULL, NULL, NULL, 0) : PQexec(self->cnx, query); status = result != NULL; } Py_END_ALLOW_THREADS } /* we don't need the query and its params any more */ Py_XDECREF(query_str_obj); Py_XDECREF(param_obj); /* checks result validity */ if (!status) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->date_format = date_format; /* this is normally NULL */ /* checks result status */ if (result && (status = PQresultStatus(result)) != PGRES_TUPLES_OK) return _conn_non_query_result(status, result, self->cnx); if (!(query_obj = PyObject_New(queryObject, &queryType))) return PyErr_NoMemory(); /* stores result and returns object */ Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->async = async; query_obj->encoding = encoding; query_obj->current_row = 0; if (async) { query_obj->max_row = 0; query_obj->num_fields = 0; query_obj->col_types = NULL; } else { query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); if (!query_obj->col_types) { Py_DECREF(query_obj); Py_DECREF(self); return NULL; } } return (PyObject *)query_obj; } /* Database query */ static char conn_query__doc__[] = "query(sql, [arg]) -- create a new query object for this connection\n\n" "You must pass the SQL (string) request and you can optionally pass\n" "a tuple with positional parameters.\n"; static PyObject * conn_query(connObject *self, PyObject *args) { return _conn_query(self, args, 0, 0); } /* Asynchronous database query */ static char conn_send_query__doc__[] = "send_query(sql, [arg]) -- create a new asynchronous query for this " "connection\n\n" "You must pass the SQL (string) request and you can optionally pass\n" "a tuple with positional parameters.\n"; static PyObject * conn_send_query(connObject *self, PyObject *args) { return _conn_query(self, args, 0, 1); } /* Execute prepared statement. */ static char conn_query_prepared__doc__[] = "query_prepared(name, [arg]) -- execute a prepared statement\n\n" "You must pass the name (string) of the prepared statement and you can\n" "optionally pass a tuple with positional parameters.\n"; static PyObject * conn_query_prepared(connObject *self, PyObject *args) { return _conn_query(self, args, 1, 0); } /* Create prepared statement. */ static char conn_prepare__doc__[] = "prepare(name, sql) -- create a prepared statement\n\n" "You must pass the name (string) of the prepared statement and the\n" "SQL (string) request for later execution.\n"; static PyObject * conn_prepare(connObject *self, PyObject *args) { char *name, *query; Py_ssize_t name_length, query_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#s#", &name, &name_length, &query, &query_length)) { PyErr_SetString(PyExc_TypeError, "Method prepare() takes two string arguments"); return NULL; } /* create prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQprepare(self->cnx, name, query, 0, NULL); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { PQclear(result); Py_INCREF(Py_None); return Py_None; /* success */ } set_error(ProgrammingError, "Cannot create prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } /* Describe prepared statement. */ static char conn_describe_prepared__doc__[] = "describe_prepared(name) -- describe a prepared statement\n\n" "You must pass the name (string) of the prepared statement.\n"; static PyObject * conn_describe_prepared(connObject *self, PyObject *args) { char *name; Py_ssize_t name_length; PGresult *result; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* reads args */ if (!PyArg_ParseTuple(args, "s#", &name, &name_length)) { PyErr_SetString(PyExc_TypeError, "Method describe_prepared() takes a string argument"); return NULL; } /* describe prepared statement */ Py_BEGIN_ALLOW_THREADS result = PQdescribePrepared(self->cnx, name); Py_END_ALLOW_THREADS if (result && PQresultStatus(result) == PGRES_COMMAND_OK) { queryObject *query_obj = PyObject_New(queryObject, &queryType); if (!query_obj) return PyErr_NoMemory(); Py_XINCREF(self); query_obj->pgcnx = self; query_obj->result = result; query_obj->encoding = PQclientEncoding(self->cnx); query_obj->current_row = 0; query_obj->max_row = PQntuples(result); query_obj->num_fields = PQnfields(result); query_obj->col_types = get_col_types(result, query_obj->num_fields); return (PyObject *)query_obj; } set_error(ProgrammingError, "Cannot describe prepared statement", self->cnx, result); if (result) PQclear(result); return NULL; /* error */ } static char conn_putline__doc__[] = "putline(line) -- send a line directly to the backend"; /* Direct access function: putline. */ static PyObject * conn_putline(connObject *self, PyObject *args) { char *line; Py_ssize_t line_length; int ret; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* read args */ if (!PyArg_ParseTuple(args, "s#", &line, &line_length)) { PyErr_SetString(PyExc_TypeError, "Method putline() takes a string argument"); return NULL; } /* send line to backend */ ret = PQputCopyData(self->cnx, line, (int)line_length); if (ret != 1) { PyErr_SetString( PyExc_IOError, ret == -1 ? PQerrorMessage(self->cnx) : "Line cannot be queued, wait for write-ready and try again"); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Direct access function: getline. */ static char conn_getline__doc__[] = "getline() -- get a line directly from the backend"; static PyObject * conn_getline(connObject *self, PyObject *noargs) { char *line = NULL; PyObject *str = NULL; int ret; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get line synchronously */ ret = PQgetCopyData(self->cnx, &line, 0); /* check result */ if (ret <= 0) { if (line != NULL) PQfreemem(line); if (ret == -1) { PQgetResult(self->cnx); Py_INCREF(Py_None); return Py_None; } PyErr_SetString( PyExc_MemoryError, ret == -2 ? PQerrorMessage(self->cnx) : "No line available, wait for read-ready and try again"); return NULL; } if (line == NULL) { Py_INCREF(Py_None); return Py_None; } /* for backward compatibility, convert terminating newline to zero byte */ if (*line) line[strlen(line) - 1] = '\0'; str = PyUnicode_FromString(line); PQfreemem(line); return str; } /* Direct access function: end copy. */ static char conn_endcopy__doc__[] = "endcopy() -- synchronize client and server"; static PyObject * conn_endcopy(connObject *self, PyObject *noargs) { int ret; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* end direct copy */ ret = PQputCopyEnd(self->cnx, NULL); if (ret != 1) { PyErr_SetString(PyExc_IOError, ret == -1 ? PQerrorMessage(self->cnx) : "Termination message cannot be queued," " wait for write-ready and try again"); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Direct access function: set blocking status. */ static char conn_set_non_blocking__doc__[] = "set_non_blocking() -- set the non-blocking status of the connection"; static PyObject * conn_set_non_blocking(connObject *self, PyObject *args) { int non_blocking; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } if (!PyArg_ParseTuple(args, "i", &non_blocking)) { PyErr_SetString( PyExc_TypeError, "set_non_blocking() expects a boolean value as argument"); return NULL; } if (PQsetnonblocking(self->cnx, non_blocking) < 0) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Direct access function: get blocking status. */ static char conn_is_non_blocking__doc__[] = "is_non_blocking() -- report the blocking status of the connection"; static PyObject * conn_is_non_blocking(connObject *self, PyObject *noargs) { int rc; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } rc = PQisnonblocking(self->cnx); if (rc < 0) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); return NULL; } return PyBool_FromLong((long)rc); } /* Insert table */ static char conn_inserttable__doc__[] = "inserttable(table, data, [columns]) -- insert iterable into table\n\n" "The fields in the iterable must be in the same order as in the table\n" "or in the list or tuple of columns if one is specified.\n"; static PyObject * conn_inserttable(connObject *self, PyObject *args) { PGresult *result; char *table, *buffer, *bufpt, *bufmax, *s, *t; int encoding, ret; size_t bufsiz; PyObject *rows, *iter_row, *item, *columns = NULL; Py_ssize_t i, j, m, n; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "sO|O", &table, &rows, &columns)) { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects a string and a list as arguments"); return NULL; } /* checks list type */ if (!(iter_row = PyObject_GetIter(rows))) { PyErr_SetString( PyExc_TypeError, "Method inserttable() expects an iterable as second argument"); return NULL; } m = PySequence_Check(rows) ? PySequence_Size(rows) : -1; if (!m) { /* no rows specified, nothing to do */ Py_DECREF(iter_row); Py_INCREF(Py_None); return Py_None; } /* checks columns type */ if (columns) { if (!(PyTuple_Check(columns) || PyList_Check(columns))) { PyErr_SetString(PyExc_TypeError, "Method inserttable() expects a tuple or a list" " as third argument"); return NULL; } n = PySequence_Fast_GET_SIZE(columns); if (!n) { /* no columns specified, nothing to do */ Py_DECREF(iter_row); Py_INCREF(Py_None); return Py_None; } } else { n = -1; /* number of columns not yet known */ } /* allocate buffer */ if (!(buffer = PyMem_Malloc(MAX_BUFFER_SIZE))) { Py_DECREF(iter_row); return PyErr_NoMemory(); } encoding = PQclientEncoding(self->cnx); /* starts query */ bufpt = buffer; bufmax = bufpt + MAX_BUFFER_SIZE; bufpt += snprintf(bufpt, (size_t)(bufmax - bufpt), "copy "); s = table; do { t = strchr(s, '.'); if (!t) t = s + strlen(s); table = PQescapeIdentifier(self->cnx, s, (size_t)(t - s)); if (bufpt < bufmax) bufpt += snprintf(bufpt, (size_t)(bufmax - bufpt), "%s", table); PQfreemem(table); s = t; if (*s && bufpt < bufmax) *bufpt++ = *s++; } while (*s); if (columns) { /* adds a string like f" ({','.join(columns)})" */ if (bufpt < bufmax) bufpt += snprintf(bufpt, (size_t)(bufmax - bufpt), " ("); for (j = 0; j < n; ++j) { PyObject *obj = PySequence_Fast_GET_ITEM(columns, j); Py_ssize_t slen; char *col; if (PyBytes_Check(obj)) { Py_INCREF(obj); } else if (PyUnicode_Check(obj)) { obj = get_encoded_string(obj, encoding); if (!obj) { PyMem_Free(buffer); Py_DECREF(iter_row); return NULL; /* pass the UnicodeEncodeError */ } } else { PyErr_SetString( PyExc_TypeError, "The third argument must contain only strings"); PyMem_Free(buffer); Py_DECREF(iter_row); return NULL; } PyBytes_AsStringAndSize(obj, &col, &slen); col = PQescapeIdentifier(self->cnx, col, (size_t)slen); Py_DECREF(obj); if (bufpt < bufmax) bufpt += snprintf(bufpt, (size_t)(bufmax - bufpt), "%s%s", col, j == n - 1 ? ")" : ","); PQfreemem(col); } } if (bufpt < bufmax) snprintf(bufpt, (size_t)(bufmax - bufpt), " from stdin"); if (bufpt >= bufmax) { PyMem_Free(buffer); Py_DECREF(iter_row); return PyErr_NoMemory(); } Py_BEGIN_ALLOW_THREADS result = PQexec(self->cnx, buffer); Py_END_ALLOW_THREADS if (!result || PQresultStatus(result) != PGRES_COPY_IN) { PyMem_Free(buffer); Py_DECREF(iter_row); PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } PQclear(result); /* feed table */ for (i = 0; m < 0 || i < m; ++i) { if (!(columns = PyIter_Next(iter_row))) break; if (!(PyTuple_Check(columns) || PyList_Check(columns))) { PQputCopyEnd(self->cnx, "Invalid arguments"); PyMem_Free(buffer); Py_DECREF(columns); Py_DECREF(columns); Py_DECREF(iter_row); PyErr_SetString( PyExc_TypeError, "The second argument must contain tuples or lists"); return NULL; } j = PySequence_Fast_GET_SIZE(columns); if (n < 0) { n = j; } else if (j != n) { PQputCopyEnd(self->cnx, "Invalid arguments"); PyMem_Free(buffer); Py_DECREF(columns); Py_DECREF(iter_row); PyErr_SetString( PyExc_TypeError, "The second arg must contain sequences of the same size"); return NULL; } /* builds insert line */ bufpt = buffer; bufsiz = MAX_BUFFER_SIZE - 1; for (j = 0; j < n; ++j) { if (j) { *bufpt++ = '\t'; --bufsiz; } item = PySequence_Fast_GET_ITEM(columns, j); /* convert item to string and append to buffer */ if (item == Py_None) { if (bufsiz > 2) { *bufpt++ = '\\'; *bufpt++ = 'N'; bufsiz -= 2; } else bufsiz = 0; } else if (PyBytes_Check(item)) { const char *t = PyBytes_AsString(item); while (*t && bufsiz) { switch (*t) { case '\\': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = '\\'; break; case '\t': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 't'; break; case '\r': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'r'; break; case '\n': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'n'; break; default: *bufpt++ = *t; } ++t; --bufsiz; } } else if (PyUnicode_Check(item)) { PyObject *s = get_encoded_string(item, encoding); if (!s) { PQputCopyEnd(self->cnx, "Encoding error"); PyMem_Free(buffer); Py_DECREF(item); Py_DECREF(columns); Py_DECREF(iter_row); return NULL; /* pass the UnicodeEncodeError */ } else { const char *t = PyBytes_AsString(s); while (*t && bufsiz) { switch (*t) { case '\\': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = '\\'; break; case '\t': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 't'; break; case '\r': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'r'; break; case '\n': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'n'; break; default: *bufpt++ = *t; } ++t; --bufsiz; } Py_DECREF(s); } } else if (PyLong_Check(item)) { PyObject *s = PyObject_Str(item); const char *t = PyUnicode_AsUTF8(s); while (*t && bufsiz) { *bufpt++ = *t++; --bufsiz; } Py_DECREF(s); } else { PyObject *s = PyObject_Repr(item); const char *t = PyUnicode_AsUTF8(s); while (*t && bufsiz) { switch (*t) { case '\\': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = '\\'; break; case '\t': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 't'; break; case '\r': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'r'; break; case '\n': *bufpt++ = '\\'; if (--bufsiz) *bufpt++ = 'n'; break; default: *bufpt++ = *t; } ++t; --bufsiz; } Py_DECREF(s); } if (bufsiz <= 0) { PQputCopyEnd(self->cnx, "Memory error"); PyMem_Free(buffer); Py_DECREF(columns); Py_DECREF(iter_row); return PyErr_NoMemory(); } } Py_DECREF(columns); *bufpt++ = '\n'; /* sends data */ ret = PQputCopyData(self->cnx, buffer, (int)(bufpt - buffer)); if (ret != 1) { char *errormsg = ret == -1 ? PQerrorMessage(self->cnx) : "Data cannot be queued"; PyErr_SetString(PyExc_IOError, errormsg); PQputCopyEnd(self->cnx, errormsg); PyMem_Free(buffer); Py_DECREF(iter_row); return NULL; } } Py_DECREF(iter_row); if (PyErr_Occurred()) { PyMem_Free(buffer); return NULL; /* pass the iteration error */ } ret = PQputCopyEnd(self->cnx, NULL); if (ret != 1) { PyErr_SetString(PyExc_IOError, ret == -1 ? PQerrorMessage(self->cnx) : "Data cannot be queued"); PyMem_Free(buffer); return NULL; } PyMem_Free(buffer); Py_BEGIN_ALLOW_THREADS result = PQgetResult(self->cnx); Py_END_ALLOW_THREADS if (PQresultStatus(result) != PGRES_COMMAND_OK) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); PQclear(result); return NULL; } else { long ntuples = atol(PQcmdTuples(result)); PQclear(result); return PyLong_FromLong(ntuples); } } /* Get transaction state. */ static char conn_transaction__doc__[] = "transaction() -- return the current transaction status"; static PyObject * conn_transaction(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } return PyLong_FromLong(PQtransactionStatus(self->cnx)); } /* Get parameter setting. */ static char conn_parameter__doc__[] = "parameter(name) -- look up a current parameter setting"; static PyObject * conn_parameter(connObject *self, PyObject *args) { const char *name; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* get query args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method parameter() takes a string as argument"); return NULL; } name = PQparameterStatus(self->cnx, name); if (name) return PyUnicode_FromString(name); /* unknown parameter, return None */ Py_INCREF(Py_None); return Py_None; } /* Get current date format. */ static char conn_date_format__doc__[] = "date_format() -- return the current date format"; static PyObject * conn_date_format(connObject *self, PyObject *noargs) { const char *fmt; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* check if the date format is cached in the connection */ fmt = self->date_format; if (!fmt) { fmt = date_style_to_format(PQparameterStatus(self->cnx, "DateStyle")); self->date_format = fmt; /* cache the result */ } return PyUnicode_FromString(fmt); } /* Escape literal */ static char conn_escape_literal__doc__[] = "escape_literal(str) -- escape a literal constant for use within SQL"; static PyObject * conn_escape_literal(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_literal() expects a string as argument"); return NULL; } to = PQescapeLiteral(self->cnx, from, (size_t)from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length, encoding); if (to) PQfreemem(to); return to_obj; } /* Escape identifier */ static char conn_escape_identifier__doc__[] = "escape_identifier(str) -- escape an identifier for use within SQL"; static PyObject * conn_escape_identifier(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method escape_identifier() expects a string as argument"); return NULL; } to = PQescapeIdentifier(self->cnx, from, (size_t)from_length); to_length = strlen(to); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length, encoding); if (to) PQfreemem(to); return to_obj; } /* Escape string */ static char conn_escape_string__doc__[] = "escape_string(str) -- escape a string for use within SQL"; static PyObject * conn_escape_string(connObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2 * (size_t)from_length + 1; if ((Py_ssize_t)to_length < from_length) { /* overflow */ to_length = (size_t)from_length; from_length = (from_length - 1) / 2; } to = (char *)PyMem_Malloc(to_length); to_length = PQescapeStringConn(self->cnx, to, from, (size_t)from_length, NULL); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char conn_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type " "bytea"; static PyObject * conn_escape_bytea(connObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = PQclientEncoding(self->cnx); tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *)PQescapeByteaConn(self->cnx, (unsigned char *)from, (size_t)from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length - 1); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } /* Constructor for large objects (internal use only) */ static largeObject * large_new(connObject *pgcnx, Oid oid) { largeObject *large_obj; if (!(large_obj = PyObject_New(largeObject, &largeType))) { return NULL; } Py_XINCREF(pgcnx); large_obj->pgcnx = pgcnx; large_obj->lo_fd = -1; large_obj->lo_oid = oid; return large_obj; } /* Create large object. */ static char conn_locreate__doc__[] = "locreate(mode) -- create a new large object in the database"; static PyObject * conn_locreate(connObject *self, PyObject *args) { int mode; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "Method locreate() takes an integer argument"); return NULL; } /* creates large object */ lo_oid = lo_creat(self->cnx, mode); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *)large_new(self, lo_oid); } /* Init from already known oid. */ static char conn_getlo__doc__[] = "getlo(oid) -- create a large object instance for the specified oid"; static PyObject * conn_getlo(connObject *self, PyObject *args) { int oid; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &oid)) { PyErr_SetString(PyExc_TypeError, "Method getlo() takes an integer argument"); return NULL; } lo_oid = (Oid)oid; if (lo_oid == 0) { PyErr_SetString(PyExc_ValueError, "The object oid can't be null"); return NULL; } /* creates object */ return (PyObject *)large_new(self, lo_oid); } /* Import unix file. */ static char conn_loimport__doc__[] = "loimport(name) -- create a new large object from specified file"; static PyObject * conn_loimport(connObject *self, PyObject *args) { char *name; Oid lo_oid; /* checks validity */ if (!_check_cnx_obj(self)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method loimport() takes a string argument"); return NULL; } /* imports file and checks result */ lo_oid = lo_import(self->cnx, name); if (lo_oid == 0) { set_error_msg(OperationalError, "Can't create large object"); return NULL; } return (PyObject *)large_new(self, lo_oid); } /* Reset connection. */ static char conn_reset__doc__[] = "reset() -- reset connection with current parameters\n\n" "All derived queries and large objects derived from this connection\n" "will not be usable after this call.\n"; static PyObject * conn_reset(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* resets the connection */ PQreset(self->cnx); Py_INCREF(Py_None); return Py_None; } /* Cancel current command. */ static char conn_cancel__doc__[] = "cancel() -- abandon processing of the current command"; static PyObject * conn_cancel(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* request that the server abandon processing of the current command */ return PyLong_FromLong((long)PQrequestCancel(self->cnx)); } /* Get connection socket. */ static char conn_fileno__doc__[] = "fileno() -- return database connection socket file handle"; static PyObject * conn_fileno(connObject *self, PyObject *noargs) { if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } return PyLong_FromLong((long)PQsocket(self->cnx)); } /* Set external typecast callback function. */ static char conn_set_cast_hook__doc__[] = "set_cast_hook(func) -- set a fallback typecast function"; static PyObject * conn_set_cast_hook(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->cast_hook); self->cast_hook = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->cast_hook); self->cast_hook = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_cast_hook() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_cast_hook__doc__[] = "get_cast_hook() -- get the fallback typecast function"; static PyObject * conn_get_cast_hook(connObject *self, PyObject *noargs) { PyObject *ret = self->cast_hook; ; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Get asynchronous connection state. */ static char conn_poll__doc__[] = "poll() -- Completes an asynchronous connection"; static PyObject * conn_poll(connObject *self, PyObject *noargs) { int rc; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } Py_BEGIN_ALLOW_THREADS rc = PQconnectPoll(self->cnx); Py_END_ALLOW_THREADS if (rc == PGRES_POLLING_FAILED) { set_error(InternalError, "Polling failed", self->cnx, NULL); return NULL; } return PyLong_FromLong(rc); } /* Set notice receiver callback function. */ static char conn_set_notice_receiver__doc__[] = "set_notice_receiver(func) -- set the current notice receiver"; static PyObject * conn_set_notice_receiver(connObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(self->notice_receiver); self->notice_receiver = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(self->notice_receiver); self->notice_receiver = func; PQsetNoticeReceiver(self->cnx, notice_receiver, self); Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Method set_notice_receiver() expects" " a callable or None as argument"); } return ret; } /* Get notice receiver callback function. */ static char conn_get_notice_receiver__doc__[] = "get_notice_receiver() -- get the current notice receiver"; static PyObject * conn_get_notice_receiver(connObject *self, PyObject *noargs) { PyObject *ret = self->notice_receiver; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Close without deleting. */ static char conn_close__doc__[] = "close() -- close connection\n\n" "All instances of the connection object and derived objects\n" "(queries and large objects) can no longer be used after this call.\n"; static PyObject * conn_close(connObject *self, PyObject *noargs) { /* connection object cannot already be closed */ if (!self->cnx) { set_error_msg(InternalError, "Connection already closed"); return NULL; } Py_BEGIN_ALLOW_THREADS PQfinish(self->cnx); Py_END_ALLOW_THREADS self->cnx = NULL; Py_INCREF(Py_None); return Py_None; } /* Get asynchronous notify. */ static char conn_get_notify__doc__[] = "getnotify() -- get database notify for this connection"; static PyObject * conn_get_notify(connObject *self, PyObject *noargs) { PGnotify *notify; if (!self->cnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } /* checks for NOTIFY messages */ PQconsumeInput(self->cnx); if (!(notify = PQnotifies(self->cnx))) { Py_INCREF(Py_None); return Py_None; } else { PyObject *notify_result, *tmp; if (!(tmp = PyUnicode_FromString(notify->relname))) { return NULL; } if (!(notify_result = PyTuple_New(3))) { return NULL; } PyTuple_SET_ITEM(notify_result, 0, tmp); if (!(tmp = PyLong_FromLong(notify->be_pid))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 1, tmp); /* extra exists even in old versions that did not support it */ if (!(tmp = PyUnicode_FromString(notify->extra))) { Py_DECREF(notify_result); return NULL; } PyTuple_SET_ITEM(notify_result, 2, tmp); PQfreemem(notify); return notify_result; } } /* Get the list of connection attributes. */ static PyObject * conn_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *)self)); PyObject_CallMethod(attrs, "extend", "[sssssssssssss]", "host", "port", "db", "options", "error", "status", "user", "protocol_version", "server_version", "socket", "backend_pid", "ssl_in_use", "ssl_attributes"); return attrs; } /* Connection object methods */ static struct PyMethodDef conn_methods[] = { {"__dir__", (PyCFunction)conn_dir, METH_NOARGS, NULL}, {"source", (PyCFunction)conn_source, METH_NOARGS, conn_source__doc__}, {"query", (PyCFunction)conn_query, METH_VARARGS, conn_query__doc__}, {"send_query", (PyCFunction)conn_send_query, METH_VARARGS, conn_send_query__doc__}, {"query_prepared", (PyCFunction)conn_query_prepared, METH_VARARGS, conn_query_prepared__doc__}, {"prepare", (PyCFunction)conn_prepare, METH_VARARGS, conn_prepare__doc__}, {"describe_prepared", (PyCFunction)conn_describe_prepared, METH_VARARGS, conn_describe_prepared__doc__}, {"poll", (PyCFunction)conn_poll, METH_NOARGS, conn_poll__doc__}, {"reset", (PyCFunction)conn_reset, METH_NOARGS, conn_reset__doc__}, {"cancel", (PyCFunction)conn_cancel, METH_NOARGS, conn_cancel__doc__}, {"close", (PyCFunction)conn_close, METH_NOARGS, conn_close__doc__}, {"fileno", (PyCFunction)conn_fileno, METH_NOARGS, conn_fileno__doc__}, {"get_cast_hook", (PyCFunction)conn_get_cast_hook, METH_NOARGS, conn_get_cast_hook__doc__}, {"set_cast_hook", (PyCFunction)conn_set_cast_hook, METH_O, conn_set_cast_hook__doc__}, {"get_notice_receiver", (PyCFunction)conn_get_notice_receiver, METH_NOARGS, conn_get_notice_receiver__doc__}, {"set_notice_receiver", (PyCFunction)conn_set_notice_receiver, METH_O, conn_set_notice_receiver__doc__}, {"getnotify", (PyCFunction)conn_get_notify, METH_NOARGS, conn_get_notify__doc__}, {"inserttable", (PyCFunction)conn_inserttable, METH_VARARGS, conn_inserttable__doc__}, {"transaction", (PyCFunction)conn_transaction, METH_NOARGS, conn_transaction__doc__}, {"parameter", (PyCFunction)conn_parameter, METH_VARARGS, conn_parameter__doc__}, {"date_format", (PyCFunction)conn_date_format, METH_NOARGS, conn_date_format__doc__}, {"escape_literal", (PyCFunction)conn_escape_literal, METH_O, conn_escape_literal__doc__}, {"escape_identifier", (PyCFunction)conn_escape_identifier, METH_O, conn_escape_identifier__doc__}, {"escape_string", (PyCFunction)conn_escape_string, METH_O, conn_escape_string__doc__}, {"escape_bytea", (PyCFunction)conn_escape_bytea, METH_O, conn_escape_bytea__doc__}, {"putline", (PyCFunction)conn_putline, METH_VARARGS, conn_putline__doc__}, {"getline", (PyCFunction)conn_getline, METH_NOARGS, conn_getline__doc__}, {"endcopy", (PyCFunction)conn_endcopy, METH_NOARGS, conn_endcopy__doc__}, {"set_non_blocking", (PyCFunction)conn_set_non_blocking, METH_VARARGS, conn_set_non_blocking__doc__}, {"is_non_blocking", (PyCFunction)conn_is_non_blocking, METH_NOARGS, conn_is_non_blocking__doc__}, {"locreate", (PyCFunction)conn_locreate, METH_VARARGS, conn_locreate__doc__}, {"getlo", (PyCFunction)conn_getlo, METH_VARARGS, conn_getlo__doc__}, {"loimport", (PyCFunction)conn_loimport, METH_VARARGS, conn_loimport__doc__}, {NULL, NULL} /* sentinel */ }; static char conn__doc__[] = "PostgreSQL connection object"; /* Connection type definition */ static PyTypeObject connType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Connection", /* tp_name */ sizeof(connObject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)conn_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ (getattrofunc)conn_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ conn__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ conn_methods, /* tp_methods */ }; PyGreSQL-PyGreSQL-166b135/ext/pginternal.c000066400000000000000000001315731450706350600201240ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * Internal functions - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* PyGreSQL internal types */ /* Simple types */ #define PYGRES_INT 1 #define PYGRES_LONG 2 #define PYGRES_FLOAT 3 #define PYGRES_DECIMAL 4 #define PYGRES_MONEY 5 #define PYGRES_BOOL 6 /* Text based types */ #define PYGRES_TEXT 8 #define PYGRES_BYTEA 9 #define PYGRES_JSON 10 #define PYGRES_OTHER 11 /* Array types */ #define PYGRES_ARRAY 16 /* Shared functions for encoding and decoding strings */ static PyObject * get_decoded_string(const char *str, Py_ssize_t size, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_DecodeUTF8(str, size, "strict"); if (encoding == pg_encoding_latin1) return PyUnicode_DecodeLatin1(str, size, "strict"); if (encoding == pg_encoding_ascii) return PyUnicode_DecodeASCII(str, size, "strict"); /* encoding name should be properly translated to Python here */ return PyUnicode_Decode(str, size, pg_encoding_to_char(encoding), "strict"); } static PyObject * get_encoded_string(PyObject *unicode_obj, int encoding) { if (encoding == pg_encoding_utf8) return PyUnicode_AsUTF8String(unicode_obj); if (encoding == pg_encoding_latin1) return PyUnicode_AsLatin1String(unicode_obj); if (encoding == pg_encoding_ascii) return PyUnicode_AsASCIIString(unicode_obj); /* encoding name should be properly translated to Python here */ return PyUnicode_AsEncodedString(unicode_obj, pg_encoding_to_char(encoding), "strict"); } /* Helper functions */ /* Get PyGreSQL internal types for a PostgreSQL type. */ static int get_type(Oid pgtype) { int t; switch (pgtype) { /* simple types */ case INT2OID: case INT4OID: case CIDOID: case OIDOID: case XIDOID: t = PYGRES_INT; break; case INT8OID: t = PYGRES_LONG; break; case FLOAT4OID: case FLOAT8OID: t = PYGRES_FLOAT; break; case NUMERICOID: t = PYGRES_DECIMAL; break; case CASHOID: t = decimal_point ? PYGRES_MONEY : PYGRES_TEXT; break; case BOOLOID: t = PYGRES_BOOL; break; case BYTEAOID: t = bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA; break; case JSONOID: case JSONBOID: t = jsondecode ? PYGRES_JSON : PYGRES_TEXT; break; case BPCHAROID: case CHAROID: case TEXTOID: case VARCHAROID: case NAMEOID: case REGTYPEOID: t = PYGRES_TEXT; break; /* array types */ case INT2ARRAYOID: case INT4ARRAYOID: case CIDARRAYOID: case OIDARRAYOID: case XIDARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_INT | PYGRES_ARRAY); break; case INT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_LONG | PYGRES_ARRAY); break; case FLOAT4ARRAYOID: case FLOAT8ARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_FLOAT | PYGRES_ARRAY); break; case NUMERICARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_DECIMAL | PYGRES_ARRAY); break; case MONEYARRAYOID: t = array_as_text ? PYGRES_TEXT : ((decimal_point ? PYGRES_MONEY : PYGRES_TEXT) | PYGRES_ARRAY); break; case BOOLARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_BOOL | PYGRES_ARRAY); break; case BYTEAARRAYOID: t = array_as_text ? PYGRES_TEXT : ((bytea_escaped ? PYGRES_TEXT : PYGRES_BYTEA) | PYGRES_ARRAY); break; case JSONARRAYOID: case JSONBARRAYOID: t = array_as_text ? PYGRES_TEXT : ((jsondecode ? PYGRES_JSON : PYGRES_TEXT) | PYGRES_ARRAY); break; case BPCHARARRAYOID: case CHARARRAYOID: case TEXTARRAYOID: case VARCHARARRAYOID: case NAMEARRAYOID: case REGTYPEARRAYOID: t = array_as_text ? PYGRES_TEXT : (PYGRES_TEXT | PYGRES_ARRAY); break; default: t = PYGRES_OTHER; } return t; } /* Get PyGreSQL column types for all result columns. */ static int * get_col_types(PGresult *result, int nfields) { int *types, *t, j; if (!(types = PyMem_Malloc(sizeof(int) * (size_t)nfields))) { return (int *)PyErr_NoMemory(); } for (j = 0, t = types; j < nfields; ++j) { *t++ = get_type(PQftype(result, j)); } return types; } /* Cast a bytea encoded text based type to a Python object. This assumes the text is null-terminated character string. */ static PyObject * cast_bytea_text(char *s) { PyObject *obj; char *tmp_str; size_t str_len; /* this function should not be called when bytea_escaped is set */ tmp_str = (char *)PQunescapeBytea((unsigned char *)s, &str_len); obj = PyBytes_FromStringAndSize(tmp_str, (Py_ssize_t)str_len); if (tmp_str) { PQfreemem(tmp_str); } return obj; } /* Cast a text based type to a Python object. This needs the character string, size and encoding. */ static PyObject * cast_sized_text(char *s, Py_ssize_t size, int encoding, int type) { PyObject *obj, *tmp_obj; char *tmp_str; size_t str_len; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_BYTEA: /* this type should not be passed when bytea_escaped is set */ /* we need to add a null byte */ tmp_str = (char *)PyMem_Malloc((size_t)size + 1); if (!tmp_str) { return PyErr_NoMemory(); } memcpy(tmp_str, s, (size_t)size); s = tmp_str; *(s + size) = '\0'; tmp_str = (char *)PQunescapeBytea((unsigned char *)s, &str_len); PyMem_Free(s); if (!tmp_str) return PyErr_NoMemory(); obj = PyBytes_FromStringAndSize(tmp_str, (Py_ssize_t)str_len); if (tmp_str) { PQfreemem(tmp_str); } break; case PYGRES_JSON: /* this type should only be passed when jsondecode is set */ obj = get_decoded_string(s, size, encoding); if (obj && jsondecode) { /* was able to decode */ tmp_obj = obj; obj = PyObject_CallFunction(jsondecode, "(O)", obj); Py_DECREF(tmp_obj); } break; default: /* PYGRES_TEXT */ obj = get_decoded_string(s, size, encoding); if (!obj) { /* cannot decode */ obj = PyBytes_FromStringAndSize(s, size); } } return obj; } /* Cast an arbitrary type to a Python object using a callback function. This needs the character string, size, encoding, the Postgres type and the external typecast function to be called. */ static PyObject * cast_other(char *s, Py_ssize_t size, int encoding, Oid pgtype, PyObject *cast_hook) { PyObject *obj; obj = cast_sized_text(s, size, encoding, PYGRES_TEXT); if (cast_hook) { PyObject *tmp_obj = obj; obj = PyObject_CallFunction(cast_hook, "(OI)", obj, pgtype); Py_DECREF(tmp_obj); } return obj; } /* Cast a simple type to a Python object. This needs a character string representation with a given size. */ static PyObject * cast_sized_simple(char *s, Py_ssize_t size, int type) { PyObject *obj, *tmp_obj; char buf[64], *t; int i, j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int)size < n) { n = (int)size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyLong_FromString(buf, NULL, 10); break; case PYGRES_LONG: n = sizeof(buf) / sizeof(buf[0]) - 1; if ((int)size < n) { n = (int)size; } for (i = 0, t = buf; i < n; ++i) { *t++ = *s++; } *t = '\0'; obj = PyLong_FromString(buf, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyUnicode_FromStringAndSize(s, size); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (i = 0, j = 0; i < size && j < n; ++i, ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } if (decimal) { buf[j] = '\0'; obj = PyObject_CallFunction(decimal, "(s)", buf); } else { tmp_obj = PyUnicode_FromString(buf); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_DECIMAL: tmp_obj = PyUnicode_FromStringAndSize(s, size); obj = decimal ? PyObject_CallFunctionObjArgs(decimal, tmp_obj, NULL) : PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyUnicode_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyUnicode_FromStringAndSize(s, size); } return obj; } /* Cast a simple type to a Python object. This needs a null-terminated character string representation. */ static PyObject * cast_unsized_simple(char *s, int type) { PyObject *obj, *tmp_obj; char buf[64]; int j, n; switch (type) { /* this must be the PyGreSQL internal type */ case PYGRES_INT: case PYGRES_LONG: obj = PyLong_FromString(s, NULL, 10); break; case PYGRES_FLOAT: tmp_obj = PyUnicode_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); break; case PYGRES_MONEY: /* this type should only be passed when decimal_point is set */ n = sizeof(buf) / sizeof(buf[0]) - 1; for (j = 0; *s && j < n; ++s) { if (*s >= '0' && *s <= '9') { buf[j++] = *s; } else if (*s == decimal_point) { buf[j++] = '.'; } else if (*s == '(' || *s == '-') { buf[j++] = '-'; } } buf[j] = '\0'; s = buf; /* FALLTHROUGH */ /* no break here */ case PYGRES_DECIMAL: if (decimal) { obj = PyObject_CallFunction(decimal, "(s)", s); } else { tmp_obj = PyUnicode_FromString(s); obj = PyFloat_FromString(tmp_obj); Py_DECREF(tmp_obj); } break; case PYGRES_BOOL: /* convert to bool only if bool_as_text is not set */ if (bool_as_text) { obj = PyUnicode_FromString(*s == 't' ? "t" : "f"); } else { obj = *s == 't' ? Py_True : Py_False; Py_INCREF(obj); } break; default: /* other types should never be passed, use cast_sized_text */ obj = PyUnicode_FromString(s); } return obj; } /* Quick case insensitive check if given sized string is null. */ #define STR_IS_NULL(s, n) \ (n == 4 && (s[0] == 'n' || s[0] == 'N') && \ (s[1] == 'u' || s[1] == 'U') && (s[2] == 'l' || s[2] == 'L') && \ (s[3] == 'l' || s[3] == 'L')) /* Cast string s with size and encoding to a Python list, using the input and output syntax for arrays. Use internal type or cast function to cast elements. The parameter delim specifies the delimiter for the elements, since some types do not use the default delimiter of a comma. */ static PyObject * cast_array(char *s, Py_ssize_t size, int encoding, int type, PyObject *cast, char delim) { PyObject *result, *stack[MAX_ARRAY_DEPTH]; char *end = s + size, *t; int depth, ranges = 0, level = 0; if (type) { type &= ~PYGRES_ARRAY; /* get the base type */ if (!type) type = PYGRES_TEXT; } if (!delim) { delim = ','; } else if (delim == '{' || delim == '}' || delim == '\\') { PyErr_SetString(PyExc_ValueError, "Invalid array delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (*s == '[') { /* dimension ranges */ int valid; for (valid = 0; !valid;) { if (s == end || *s++ != '[') break; while (s != end && *s == ' ') ++s; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ':') break; if (s != end && (*s == '+' || *s == '-')) ++s; if (s == end || *s < '0' || *s > '9') break; while (s != end && *s >= '0' && *s <= '9') ++s; if (s == end || *s++ != ']') break; while (s != end && *s == ' ') ++s; ++ranges; if (s != end && *s == '=') { do ++s; while (s != end && *s == ' '); valid = 1; } } if (!valid) { PyErr_SetString(PyExc_ValueError, "Invalid array dimensions"); return NULL; } } for (t = s, depth = 0; t != end && (*t == '{' || *t == ' '); ++t) { if (*t == '{') ++depth; } if (!depth) { PyErr_SetString(PyExc_ValueError, "Array must start with a left brace"); return NULL; } if (ranges && depth != ranges) { PyErr_SetString(PyExc_ValueError, "Array dimensions do not match content"); return NULL; } if (depth > MAX_ARRAY_DEPTH) { PyErr_SetString(PyExc_ValueError, "Array is too deeply nested"); return NULL; } depth--; /* next level of parsing */ result = PyList_New(0); if (!result) return NULL; do ++s; while (s != end && *s == ' '); /* everything is set up, start parsing the array */ while (s != end) { if (*s == '}') { PyObject *subresult; if (!level) break; /* top level array ended */ do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray expected but not found"); Py_DECREF(result); return NULL; } } else if (*s != '}') break; /* error */ subresult = result; result = stack[--level]; if (PyList_Append(result, subresult)) { Py_DECREF(result); return NULL; } } else if (level == depth) { /* we expect elements at this level */ PyObject *element; char *estr; Py_ssize_t esize; int escaped = 0; if (*s == '{') { PyErr_SetString(PyExc_ValueError, "Subarray found where not expected"); Py_DECREF(result); return NULL; } if (*s == '"') { /* quoted element */ estr = ++s; while (s != end && *s != '"') { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } esize = s - estr; do ++s; while (s != end && *s == ' '); } else { /* unquoted element */ estr = s; /* can contain blanks inside */ while (s != end && *s != '"' && *s != '{' && *s != '}' && *s != delim) { if (*s == '\\') { ++s; if (s == end) break; escaped = 1; } ++s; } t = s; while (t > estr && *(t - 1) == ' ') --t; if (!(esize = t - estr)) { s = end; break; /* error */ } if (STR_IS_NULL(estr, esize)) /* NULL gives None */ estr = NULL; } if (s == end) break; /* error */ if (estr) { if (escaped) { char *r; Py_ssize_t i; /* create unescaped string */ t = estr; estr = (char *)PyMem_Malloc((size_t)esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } for (i = 0, r = estr; i < esize; ++i) { if (*t == '\\') ++t, ++i; *r++ = *t++; } esize = r - estr; } if (type) { /* internal casting of base type */ if (type & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, type); else element = cast_sized_simple(estr, esize, type); } else { /* external casting of base type */ element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) { /* no decoding necessary or possible */ element = PyBytes_FromStringAndSize(estr, esize); } if (element && cast) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs(cast, element, NULL); Py_DECREF(tmp); } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); element = Py_None; } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (*s == delim) { do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ } else if (*s != '}') break; /* error */ } else { /* we expect arrays at this level */ if (*s != '{') { PyErr_SetString(PyExc_ValueError, "Subarray must start with a left brace"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s == end) break; /* error */ stack[level++] = result; if (!(result = PyList_New(0))) return NULL; } } if (s == end || *s != '}') { PyErr_SetString(PyExc_ValueError, "Unexpected end of array"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of array"); Py_DECREF(result); return NULL; } return result; } /* Cast string s with size and encoding to a Python tuple. using the input and output syntax for composite types. Use array of internal types or cast function or sequence of cast functions to cast elements. The parameter len is the record size. The parameter delim can specify a delimiter for the elements, although composite types always use a comma as delimiter. */ static PyObject * cast_record(char *s, Py_ssize_t size, int encoding, int *type, PyObject *cast, Py_ssize_t len, char delim) { PyObject *result, *ret; char *end = s + size, *t; Py_ssize_t i; if (!delim) { delim = ','; } else if (delim == '(' || delim == ')' || delim == '\\') { PyErr_SetString(PyExc_ValueError, "Invalid record delimiter"); return NULL; } /* strip blanks at the beginning */ while (s != end && *s == ' ') ++s; if (s == end || *s != '(') { PyErr_SetString(PyExc_ValueError, "Record must start with a left parenthesis"); return NULL; } result = PyList_New(0); if (!result) return NULL; i = 0; /* everything is set up, start parsing the record */ while (++s != end) { PyObject *element; if (*s == ')' || *s == delim) { Py_INCREF(Py_None); element = Py_None; } else { char *estr; Py_ssize_t esize; int quoted = 0, escaped = 0; estr = s; quoted = *s == '"'; if (quoted) ++s; esize = 0; while (s != end) { if (!quoted && (*s == ')' || *s == delim)) break; if (*s == '"') { ++s; if (s == end) break; if (!(quoted && *s == '"')) { quoted = !quoted; continue; } } if (*s == '\\') { ++s; if (s == end) break; } ++s, ++esize; } if (s == end) break; /* error */ if (estr + esize != s) { char *r; escaped = 1; /* create unescaped string */ t = estr; estr = (char *)PyMem_Malloc((size_t)esize); if (!estr) { Py_DECREF(result); return PyErr_NoMemory(); } quoted = 0; r = estr; while (t != s) { if (*t == '"') { ++t; if (!(quoted && *t == '"')) { quoted = !quoted; continue; } } if (*t == '\\') ++t; *r++ = *t++; } } if (type) { /* internal casting of element type */ int etype = type[i]; if (etype & PYGRES_ARRAY) element = cast_array(estr, esize, encoding, etype, NULL, 0); else if (etype & PYGRES_TEXT) element = cast_sized_text(estr, esize, encoding, etype); else element = cast_sized_simple(estr, esize, etype); } else { /* external casting of base type */ element = encoding == pg_encoding_ascii ? NULL : get_decoded_string(estr, esize, encoding); if (!element) { /* no decoding necessary or possible */ element = PyBytes_FromStringAndSize(estr, esize); } if (element && cast) { if (len) { PyObject *ecast = PySequence_GetItem(cast, i); if (ecast) { if (ecast != Py_None) { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs( ecast, element, NULL); Py_DECREF(tmp); } } else { Py_DECREF(element); element = NULL; } } else { PyObject *tmp = element; element = PyObject_CallFunctionObjArgs(cast, element, NULL); Py_DECREF(tmp); } } } if (escaped) PyMem_Free(estr); if (!element) { Py_DECREF(result); return NULL; } } if (PyList_Append(result, element)) { Py_DECREF(element); Py_DECREF(result); return NULL; } Py_DECREF(element); if (len) ++i; if (*s != delim) break; /* no next record */ if (len && i >= len) { PyErr_SetString(PyExc_ValueError, "Too many columns"); Py_DECREF(result); return NULL; } } if (s == end || *s != ')') { PyErr_SetString(PyExc_ValueError, "Unexpected end of record"); Py_DECREF(result); return NULL; } do ++s; while (s != end && *s == ' '); if (s != end) { PyErr_SetString(PyExc_ValueError, "Unexpected characters after end of record"); Py_DECREF(result); return NULL; } if (len && i < len) { PyErr_SetString(PyExc_ValueError, "Too few columns"); Py_DECREF(result); return NULL; } ret = PyList_AsTuple(result); Py_DECREF(result); return ret; } /* Cast string s with size and encoding to a Python dictionary. using the input and output syntax for hstore values. */ static PyObject * cast_hstore(char *s, Py_ssize_t size, int encoding) { PyObject *result; char *end = s + size; result = PyDict_New(); /* everything is set up, start parsing the record */ while (s != end) { char *key, *val; PyObject *key_obj, *val_obj; Py_ssize_t key_esc = 0, val_esc = 0, size; int quoted; while (s != end && *s == ' ') ++s; if (s == end) break; quoted = *s == '"'; if (quoted) { key = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { key = s; while (s != end) { if (*s == '=' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++key_esc; } ++s; } if (s == key) { PyErr_SetString(PyExc_ValueError, "Missing key"); Py_DECREF(result); return NULL; } } size = s - key - key_esc; if (key_esc) { char *r = key, *t; key = (char *)PyMem_Malloc((size_t)size); if (!key) { Py_DECREF(result); return PyErr_NoMemory(); } t = key; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } key_obj = cast_sized_text(key, size, encoding, PYGRES_TEXT); if (key_esc) PyMem_Free(key); if (!key_obj) { Py_DECREF(result); return NULL; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s == end || *s++ != '=' || s == end || *s++ != '>') { PyErr_SetString(PyExc_ValueError, "Invalid characters after key"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; quoted = *s == '"'; if (quoted) { val = ++s; while (s != end) { if (*s == '"') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == end) { PyErr_SetString(PyExc_ValueError, "Unterminated quote"); Py_DECREF(result); return NULL; } } else { val = s; while (s != end) { if (*s == ',' || *s == ' ') break; if (*s == '\\') { if (++s == end) break; ++val_esc; } ++s; } if (s == val) { PyErr_SetString(PyExc_ValueError, "Missing value"); Py_DECREF(key_obj); Py_DECREF(result); return NULL; } if (STR_IS_NULL(val, s - val)) val = NULL; } if (val) { size = s - val - val_esc; if (val_esc) { char *r = val, *t; val = (char *)PyMem_Malloc((size_t)size); if (!val) { Py_DECREF(key_obj); Py_DECREF(result); return PyErr_NoMemory(); } t = val; while (r != s) { if (*r == '\\') { ++r; if (r == s) break; } *t++ = *r++; } } val_obj = cast_sized_text(val, size, encoding, PYGRES_TEXT); if (val_esc) PyMem_Free(val); if (!val_obj) { Py_DECREF(key_obj); Py_DECREF(result); return NULL; } } else { Py_INCREF(Py_None); val_obj = Py_None; } if (quoted) ++s; while (s != end && *s == ' ') ++s; if (s != end) { if (*s++ != ',') { PyErr_SetString(PyExc_ValueError, "Invalid characters after val"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } while (s != end && *s == ' ') ++s; if (s == end) { PyErr_SetString(PyExc_ValueError, "Missing entry"); Py_DECREF(key_obj); Py_DECREF(val_obj); Py_DECREF(result); return NULL; } } PyDict_SetItem(result, key_obj, val_obj); Py_DECREF(key_obj); Py_DECREF(val_obj); } return result; } /* Get appropriate error type from sqlstate. */ static PyObject * get_error_type(const char *sqlstate) { switch (sqlstate[0]) { case '0': switch (sqlstate[1]) { case 'A': return NotSupportedError; } break; case '2': switch (sqlstate[1]) { case '0': case '1': return ProgrammingError; case '2': return DataError; case '3': return IntegrityError; case '4': case '5': return InternalError; case '6': case '7': case '8': return OperationalError; case 'B': case 'D': case 'F': return InternalError; } break; case '3': switch (sqlstate[1]) { case '4': return OperationalError; case '8': case '9': case 'B': return InternalError; case 'D': case 'F': return ProgrammingError; } break; case '4': switch (sqlstate[1]) { case '0': return OperationalError; case '2': case '4': return ProgrammingError; } break; case '5': case 'H': return OperationalError; case 'F': case 'P': case 'X': return InternalError; } return DatabaseError; } /* Set database error message and sqlstate attribute. */ static void set_error_msg_and_state(PyObject *type, const char *msg, int encoding, const char *sqlstate) { PyObject *err_obj, *msg_obj, *sql_obj = NULL; if (encoding == -1) /* unknown */ msg_obj = PyUnicode_DecodeLocale(msg, NULL); else msg_obj = get_decoded_string(msg, (Py_ssize_t)strlen(msg), encoding); if (!msg_obj) /* cannot decode */ msg_obj = PyBytes_FromString(msg); if (sqlstate) { sql_obj = PyUnicode_FromStringAndSize(sqlstate, 5); } else { Py_INCREF(Py_None); sql_obj = Py_None; } err_obj = PyObject_CallFunctionObjArgs(type, msg_obj, NULL); if (err_obj) { Py_DECREF(msg_obj); PyObject_SetAttrString(err_obj, "sqlstate", sql_obj); Py_DECREF(sql_obj); PyErr_SetObject(type, err_obj); Py_DECREF(err_obj); } else { PyErr_SetString(type, msg); } } /* Set given database error message. */ static void set_error_msg(PyObject *type, const char *msg) { set_error_msg_and_state(type, msg, pg_encoding_ascii, NULL); } /* Set database error from connection and/or result. */ static void set_error(PyObject *type, const char *msg, PGconn *cnx, PGresult *result) { char *sqlstate = NULL; int encoding = pg_encoding_ascii; if (cnx) { char *err_msg = PQerrorMessage(cnx); if (err_msg) { msg = err_msg; encoding = PQclientEncoding(cnx); } } if (result) { sqlstate = PQresultErrorField(result, PG_DIAG_SQLSTATE); if (sqlstate) type = get_error_type(sqlstate); } set_error_msg_and_state(type, msg, encoding, sqlstate); } /* Get SSL attributes and values as a dictionary. */ static PyObject * get_ssl_attributes(PGconn *cnx) { PyObject *attr_dict = NULL; const char *const *s; if (!(attr_dict = PyDict_New())) { return NULL; } for (s = PQsslAttributeNames(cnx); *s; ++s) { const char *val = PQsslAttribute(cnx, *s); if (val) { PyObject *val_obj = PyUnicode_FromString(val); PyDict_SetItemString(attr_dict, *s, val_obj); Py_DECREF(val_obj); } else { PyDict_SetItemString(attr_dict, *s, Py_None); } } return attr_dict; } /* Format result (mostly useful for debugging). Note: This is similar to the Postgres function PQprint(). PQprint() is not used because handing over a stream from Python to PostgreSQL can be problematic if they use different libs for streams and because using PQprint() and tp_print is not recommended any more. */ static PyObject * format_result(const PGresult *res) { const int n = PQnfields(res); if (n > 0) { char *const aligns = (char *)PyMem_Malloc((unsigned int)n * sizeof(char)); size_t *const sizes = (size_t *)PyMem_Malloc((unsigned int)n * sizeof(size_t)); if (aligns && sizes) { const int m = PQntuples(res); int i, j; size_t size; char *buffer; /* calculate sizes and alignments */ for (j = 0; j < n; ++j) { const char *const s = PQfname(res, j); const int format = PQfformat(res, j); sizes[j] = s ? strlen(s) : 0; if (format) { aligns[j] = '\0'; if (m && sizes[j] < 8) /* "" must fit */ sizes[j] = 8; } else { const Oid ftype = PQftype(res, j); switch (ftype) { case INT2OID: case INT4OID: case INT8OID: case FLOAT4OID: case FLOAT8OID: case NUMERICOID: case OIDOID: case XIDOID: case CIDOID: case CASHOID: aligns[j] = 'r'; break; default: aligns[j] = 'l'; } } } for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { if (aligns[j]) { const int k = PQgetlength(res, i, j); if (sizes[j] < (size_t)k) /* value must fit */ sizes[j] = (size_t)k; } } } size = 0; /* size of one row */ for (j = 0; j < n; ++j) size += sizes[j] + 1; /* times number of rows incl. heading */ size *= (size_t)m + 2; /* plus size of footer */ size += 40; /* is the buffer size that needs to be allocated */ buffer = (char *)PyMem_Malloc(size); if (buffer) { char *p = buffer; PyObject *result; /* create the header */ for (j = 0; j < n; ++j) { const char *const s = PQfname(res, j); const size_t k = sizes[j]; const size_t h = (k - (size_t)strlen(s)) / 2; sprintf(p, "%*s", (int)h, ""); sprintf(p + h, "%-*s", (int)(k - h), s); p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; for (j = 0; j < n; ++j) { size_t k = sizes[j]; while (k--) *p++ = '-'; if (j + 1 < n) *p++ = '+'; } *p++ = '\n'; /* create the body */ for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { const char align = aligns[j]; const size_t k = sizes[j]; if (align) { sprintf(p, align == 'r' ? "%*s" : "%-*s", (int)k, PQgetvalue(res, i, j)); } else { sprintf(p, "%-*s", (int)k, PQgetisnull(res, i, j) ? "" : ""); } p += k; if (j + 1 < n) *p++ = '|'; } *p++ = '\n'; } /* free memory */ PyMem_Free(aligns); PyMem_Free(sizes); /* create the footer */ sprintf(p, "(%d row%s)", m, m == 1 ? "" : "s"); /* return the result */ result = PyUnicode_FromString(buffer); PyMem_Free(buffer); return result; } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else { PyMem_Free(aligns); PyMem_Free(sizes); return PyErr_NoMemory(); } } else return PyUnicode_FromString("(nothing selected)"); } /* Internal function converting a Postgres datestyles to date formats. */ static const char * date_style_to_format(const char *s) { static const char *formats[] = { "%Y-%m-%d", /* 0 = ISO */ "%m-%d-%Y", /* 1 = Postgres, MDY */ "%d-%m-%Y", /* 2 = Postgres, DMY */ "%m/%d/%Y", /* 3 = SQL, MDY */ "%d/%m/%Y", /* 4 = SQL, DMY */ "%d.%m.%Y" /* 5 = German */ }; switch (s ? *s : 'I') { case 'P': /* Postgres */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 2 : 1]; case 'S': /* SQL */ s = strchr(s + 1, ','); if (s) do ++s; while (*s && *s == ' '); return formats[s && *s == 'D' ? 4 : 3]; case 'G': /* German */ return formats[5]; default: /* ISO */ return formats[0]; /* ISO is the default */ } } /* Internal function converting a date format to a Postgres datestyle. */ static const char * date_format_to_style(const char *s) { static const char *datestyle[] = { "ISO, YMD", /* 0 = %Y-%m-%d */ "Postgres, MDY", /* 1 = %m-%d-%Y */ "Postgres, DMY", /* 2 = %d-%m-%Y */ "SQL, MDY", /* 3 = %m/%d/%Y */ "SQL, DMY", /* 4 = %d/%m/%Y */ "German, DMY" /* 5 = %d.%m.%Y */ }; switch (s ? s[1] : 'Y') { case 'm': switch (s[2]) { case '/': return datestyle[3]; /* SQL, MDY */ default: return datestyle[1]; /* Postgres, MDY */ } case 'd': switch (s[2]) { case '/': return datestyle[4]; /* SQL, DMY */ case '.': return datestyle[5]; /* German */ default: return datestyle[2]; /* Postgres, DMY */ } default: return datestyle[0]; /* ISO */ } } /* Internal wrapper for the notice receiver callback. */ static void notice_receiver(void *arg, const PGresult *res) { PyGILState_STATE gstate = PyGILState_Ensure(); connObject *self = (connObject *)arg; PyObject *func = self->notice_receiver; if (func) { noticeObject *notice = PyObject_New(noticeObject, ¬iceType); PyObject *ret; if (notice) { notice->pgcnx = arg; notice->res = res; } else { Py_INCREF(Py_None); notice = (noticeObject *)(void *)Py_None; } ret = PyObject_CallFunction(func, "(O)", notice); Py_XDECREF(ret); } PyGILState_Release(gstate); } PyGreSQL-PyGreSQL-166b135/ext/pglarge.c000066400000000000000000000321711450706350600173740ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * Large object support - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate large object. */ static void large_dealloc(largeObject *self) { /* Note: We do not try to close the large object here anymore, since the server automatically closes it at the end of the transaction in which it was created. So the object might already be closed, which will then cause error messages on the server. In other situations we might close the object too early here if the Python object falls out of scope but is still needed. */ Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return large object as string in human readable form. */ static PyObject * large_str(largeObject *self) { char str[80]; sprintf(str, self->lo_fd >= 0 ? "Opened large object, oid %ld" : "Closed large object, oid %ld", (long)self->lo_oid); return PyUnicode_FromString(str); } /* Check validity of large object. */ static int _check_lo_obj(largeObject *self, int level) { if (!_check_cnx_obj(self->pgcnx)) return 0; if (!self->lo_oid) { set_error_msg(IntegrityError, "Object is not valid (null oid)"); return 0; } if (level & CHECK_OPEN) { if (self->lo_fd < 0) { PyErr_SetString(PyExc_IOError, "Object is not opened"); return 0; } } if (level & CHECK_CLOSE) { if (self->lo_fd >= 0) { PyErr_SetString(PyExc_IOError, "Object is already opened"); return 0; } } return 1; } /* Get large object attributes. */ static PyObject * large_getattr(largeObject *self, PyObject *nameobj) { const char *name = PyUnicode_AsUTF8(nameobj); /* list postgreSQL large object fields */ /* associated pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_lo_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *)(self->pgcnx); } PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* large object oid */ if (!strcmp(name, "oid")) { if (_check_lo_obj(self, 0)) return PyLong_FromLong((long)self->lo_oid); PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } /* error (status) message */ if (!strcmp(name, "error")) return PyUnicode_FromString(PQerrorMessage(self->pgcnx->cnx)); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *)self, nameobj); } /* Get the list of large object attributes. */ static PyObject * large_dir(largeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *)self)); PyObject_CallMethod(attrs, "extend", "[sss]", "oid", "pgcnx", "error"); return attrs; } /* Open large object. */ static char large_open__doc__[] = "open(mode) -- open access to large object with specified mode\n\n" "The mode must be one of INV_READ, INV_WRITE (module level constants).\n"; static PyObject * large_open(largeObject *self, PyObject *args) { int mode, fd; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &mode)) { PyErr_SetString(PyExc_TypeError, "The open() method takes an integer argument"); return NULL; } /* check validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* opens large object */ if ((fd = lo_open(self->pgcnx->cnx, self->lo_oid, mode)) == -1) { PyErr_SetString(PyExc_IOError, "Can't open large object"); return NULL; } self->lo_fd = fd; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Close large object. */ static char large_close__doc__[] = "close() -- close access to large object data"; static PyObject * large_close(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* closes large object */ if (lo_close(self->pgcnx->cnx, self->lo_fd)) { PyErr_SetString(PyExc_IOError, "Error while closing large object fd"); return NULL; } self->lo_fd = -1; /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Read from large object. */ static char large_read__doc__[] = "read(size) -- read from large object to sized string\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_read(largeObject *self, PyObject *args) { int size; PyObject *buffer; /* gets arguments */ if (!PyArg_ParseTuple(args, "i", &size)) { PyErr_SetString(PyExc_TypeError, "Method read() takes an integer argument"); return NULL; } if (size <= 0) { PyErr_SetString(PyExc_ValueError, "Method read() takes a positive integer as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* allocate buffer and runs read */ buffer = PyBytes_FromStringAndSize((char *)NULL, size); if ((size = lo_read(self->pgcnx->cnx, self->lo_fd, PyBytes_AS_STRING((PyBytesObject *)(buffer)), (size_t)size)) == -1) { PyErr_SetString(PyExc_IOError, "Error while reading"); Py_XDECREF(buffer); return NULL; } /* resize buffer and returns it */ _PyBytes_Resize(&buffer, size); return buffer; } /* Write to large object. */ static char large_write__doc__[] = "write(string) -- write sized string to large object\n\n" "Object must be opened in read mode before calling this method.\n"; static PyObject * large_write(largeObject *self, PyObject *args) { char *buffer; int size; Py_ssize_t bufsize; /* gets arguments */ if (!PyArg_ParseTuple(args, "s#", &buffer, &bufsize)) { PyErr_SetString(PyExc_TypeError, "Method write() expects a sized string as argument"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((size = lo_write(self->pgcnx->cnx, self->lo_fd, buffer, (size_t)bufsize)) != bufsize) { PyErr_SetString(PyExc_IOError, "Buffer truncated during write"); return NULL; } /* no error : returns Py_None */ Py_INCREF(Py_None); return Py_None; } /* Go to position in large object. */ static char large_seek__doc__[] = "seek(offset, whence) -- move to specified position\n\n" "Object must be opened before calling this method. The whence option\n" "can be SEEK_SET, SEEK_CUR or SEEK_END (module level constants).\n"; static PyObject * large_seek(largeObject *self, PyObject *args) { /* offset and whence are initialized to keep compiler happy */ int ret, offset = 0, whence = 0; /* gets arguments */ if (!PyArg_ParseTuple(args, "ii", &offset, &whence)) { PyErr_SetString(PyExc_TypeError, "Method lseek() expects two integer arguments"); return NULL; } /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* sends query */ if ((ret = lo_lseek(self->pgcnx->cnx, self->lo_fd, offset, whence)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving cursor"); return NULL; } /* returns position */ return PyLong_FromLong(ret); } /* Get large object size. */ static char large_size__doc__[] = "size() -- return large object size\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_size(largeObject *self, PyObject *noargs) { int start, end; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting current position"); return NULL; } /* gets end position */ if ((end = lo_lseek(self->pgcnx->cnx, self->lo_fd, 0, SEEK_END)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting end position"); return NULL; } /* move back to start position */ if ((start = lo_lseek(self->pgcnx->cnx, self->lo_fd, start, SEEK_SET)) == -1) { PyErr_SetString(PyExc_IOError, "Error while moving back to first position"); return NULL; } /* returns size */ return PyLong_FromLong(end); } /* Get large object cursor position. */ static char large_tell__doc__[] = "tell() -- give current position in large object\n\n" "The object must be opened before calling this method.\n"; static PyObject * large_tell(largeObject *self, PyObject *noargs) { int start; /* checks validity */ if (!_check_lo_obj(self, CHECK_OPEN)) { return NULL; } /* gets current position */ if ((start = lo_tell(self->pgcnx->cnx, self->lo_fd)) == -1) { PyErr_SetString(PyExc_IOError, "Error while getting position"); return NULL; } /* returns size */ return PyLong_FromLong(start); } /* Export large object as unix file. */ static char large_export__doc__[] = "export(filename) -- export large object data to specified file\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_export(largeObject *self, PyObject *args) { char *name; /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* gets arguments */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "The method export() takes a filename as argument"); return NULL; } /* runs command */ if (lo_export(self->pgcnx->cnx, self->lo_oid, name) != 1) { PyErr_SetString(PyExc_IOError, "Error while exporting large object"); return NULL; } Py_INCREF(Py_None); return Py_None; } /* Delete a large object. */ static char large_unlink__doc__[] = "unlink() -- destroy large object\n\n" "The object must be closed when calling this method.\n"; static PyObject * large_unlink(largeObject *self, PyObject *noargs) { /* checks validity */ if (!_check_lo_obj(self, CHECK_CLOSE)) { return NULL; } /* deletes the object, invalidate it on success */ if (lo_unlink(self->pgcnx->cnx, self->lo_oid) != 1) { PyErr_SetString(PyExc_IOError, "Error while unlinking large object"); return NULL; } self->lo_oid = 0; Py_INCREF(Py_None); return Py_None; } /* Large object methods */ static struct PyMethodDef large_methods[] = { {"__dir__", (PyCFunction)large_dir, METH_NOARGS, NULL}, {"open", (PyCFunction)large_open, METH_VARARGS, large_open__doc__}, {"close", (PyCFunction)large_close, METH_NOARGS, large_close__doc__}, {"read", (PyCFunction)large_read, METH_VARARGS, large_read__doc__}, {"write", (PyCFunction)large_write, METH_VARARGS, large_write__doc__}, {"seek", (PyCFunction)large_seek, METH_VARARGS, large_seek__doc__}, {"size", (PyCFunction)large_size, METH_NOARGS, large_size__doc__}, {"tell", (PyCFunction)large_tell, METH_NOARGS, large_tell__doc__}, {"export", (PyCFunction)large_export, METH_VARARGS, large_export__doc__}, {"unlink", (PyCFunction)large_unlink, METH_NOARGS, large_unlink__doc__}, {NULL, NULL}}; static char large__doc__[] = "PostgreSQL large object"; /* Large object type definition */ static PyTypeObject largeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.LargeObject", /* tp_name */ sizeof(largeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor)large_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc)large_str, /* tp_str */ (getattrofunc)large_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ large__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ large_methods, /* tp_methods */ }; PyGreSQL-PyGreSQL-166b135/ext/pgmodule.c000066400000000000000000001211511450706350600175640ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * This is the main file for the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Note: This should be linked against the same C runtime lib as Python */ #define PY_SSIZE_T_CLEAN #include #include #include /* The type definitions from */ #include "pgtypes.h" static PyObject *Error, *Warning, *InterfaceError, *DatabaseError, *InternalError, *OperationalError, *ProgrammingError, *IntegrityError, *DataError, *NotSupportedError, *InvalidResultError, *NoResultError, *MultipleResultsError, *Connection, *Query, *LargeObject; #define _TOSTRING(x) #x #define TOSTRING(x) _TOSTRING(x) static const char *PyPgVersion = TOSTRING(PYGRESQL_VERSION); #if SIZEOF_SIZE_T != SIZEOF_INT #define Py_InitModule4 Py_InitModule4_64 #endif /* Default values */ #define PG_ARRAYSIZE 1 /* Flags for object validity checks */ #define CHECK_OPEN 1 #define CHECK_CLOSE 2 #define CHECK_CNX 4 #define CHECK_RESULT 8 #define CHECK_DQL 16 /* Query result types */ #define RESULT_EMPTY 1 #define RESULT_DML 2 #define RESULT_DDL 3 #define RESULT_DQL 4 /* Flags for move methods */ #define QUERY_MOVEFIRST 1 #define QUERY_MOVELAST 2 #define QUERY_MOVENEXT 3 #define QUERY_MOVEPREV 4 #define MAX_BUFFER_SIZE 65536 /* maximum transaction size */ #define MAX_ARRAY_DEPTH 16 /* maximum allowed depth of an array */ /* MODULE GLOBAL VARIABLES */ static PyObject *pg_default_host; /* default database host */ static PyObject *pg_default_base; /* default database name */ static PyObject *pg_default_opt; /* default connection options */ static PyObject *pg_default_port; /* default connection port */ static PyObject *pg_default_user; /* default username */ static PyObject *pg_default_passwd; /* default password */ static PyObject *decimal = NULL, /* decimal type */ *dictiter = NULL, /* function for getting dict results */ *namediter = NULL, /* function for getting named results */ *namednext = NULL, /* function for getting one named result */ *scalariter = NULL, /* function for getting scalar results */ *jsondecode = NULL; /* function for decoding json strings */ static const char *date_format = NULL; /* date format that is always assumed */ static char decimal_point = '.'; /* decimal point used in money values */ static int bool_as_text = 0; /* whether bool shall be returned as text */ static int array_as_text = 0; /* whether arrays shall be returned as text */ static int bytea_escaped = 0; /* whether bytea shall be returned escaped */ static int pg_encoding_utf8 = 0; static int pg_encoding_latin1 = 0; static int pg_encoding_ascii = 0; /* OBJECTS ======= Each object has a number of elements. The naming scheme will be based on the object type. Here are the elements using example object type "foo". - fooType: Type definition for object. - fooObject: A structure to hold local object information. - foo_methods: Methods declaration. - foo_method_name: Object methods. The objects that we need to create: - pg: The module itself. - conn: Connection object returned from pg.connect(). - notice: Notice object returned from pg.notice(). - large: Large object returned by pg.conn.locreate() and pg.conn.loimport(). - query: Query object returned by pg.conn.query(). - source: Source object returned by pg.conn.source(). */ /* Forward declarations for types */ static PyTypeObject connType, sourceType, queryType, noticeType, largeType; /* Forward static declarations */ static void notice_receiver(void *, const PGresult *); /* Object declarations */ typedef struct { PyObject_HEAD int valid; /* validity flag */ PGconn *cnx; /* Postgres connection handle */ const char *date_format; /* date format derived from datestyle */ PyObject *cast_hook; /* external typecast method */ PyObject *notice_receiver; /* current notice receiver */ } connObject; #define is_connObject(v) (PyType(v) == &connType) typedef struct { PyObject_HEAD int valid; /* validity flag */ connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int encoding; /* client encoding */ int result_type; /* result type (DDL/DML/DQL) */ long arraysize; /* array size for fetch method */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ } sourceObject; #define is_sourceObject(v) (PyType(v) == &sourceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult const *res; /* an error or warning */ } noticeObject; #define is_noticeObject(v) (PyType(v) == ¬iceType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ PGresult *result; /* result content */ int async; /* flag for asynchronous queries */ int encoding; /* client encoding */ int current_row; /* currently selected row */ int max_row; /* number of rows in the result */ int num_fields; /* number of fields in each row */ int *col_types; /* PyGreSQL column types */ } queryObject; #define is_queryObject(v) (PyType(v) == &queryType) typedef struct { PyObject_HEAD connObject *pgcnx; /* parent connection object */ Oid lo_oid; /* large object oid */ int lo_fd; /* large object fd */ } largeObject; #define is_largeObject(v) (PyType(v) == &largeType) /* Internal functions */ #include "pginternal.c" /* Connection object */ #include "pgconn.c" /* Query object */ #include "pgquery.c" /* Source object */ #include "pgsource.c" /* Notice object */ #include "pgnotice.c" /* Large objects */ #include "pglarge.c" /* MODULE FUNCTIONS */ /* Connect to a database. */ static char pg_connect__doc__[] = "connect(dbname, host, port, opt, user, passwd, nowait) -- connect to a " "PostgreSQL database\n\n" "The connection uses the specified parameters (optional, keywords " "aware).\n"; static PyObject * pg_connect(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"dbname", "host", "port", "opt", "user", "passwd", "nowait", NULL}; char *pghost, *pgopt, *pgdbname, *pguser, *pgpasswd; int pgport = -1, nowait = 0, nkw = 0; char port_buffer[20]; const char *keywords[sizeof(kwlist) / sizeof(*kwlist) + 1], *values[sizeof(kwlist) / sizeof(*kwlist) + 1]; connObject *conn_obj; pghost = pgopt = pgdbname = pguser = pgpasswd = NULL; /* * parses standard arguments With the right compiler warnings, this * will issue a diagnostic. There is really no way around it. If I * don't declare kwlist as const char *kwlist[] then it complains when * I try to assign all those constant strings to it. */ if (!PyArg_ParseTupleAndKeywords(args, dict, "|zzizzzi", (char **)kwlist, &pgdbname, &pghost, &pgport, &pgopt, &pguser, &pgpasswd, &nowait)) { return NULL; } /* handles defaults variables (for uninitialised vars) */ if ((!pghost) && (pg_default_host != Py_None)) pghost = PyBytes_AsString(pg_default_host); if ((pgport == -1) && (pg_default_port != Py_None)) pgport = (int)PyLong_AsLong(pg_default_port); if ((!pgopt) && (pg_default_opt != Py_None)) pgopt = PyBytes_AsString(pg_default_opt); if ((!pgdbname) && (pg_default_base != Py_None)) pgdbname = PyBytes_AsString(pg_default_base); if ((!pguser) && (pg_default_user != Py_None)) pguser = PyBytes_AsString(pg_default_user); if ((!pgpasswd) && (pg_default_passwd != Py_None)) pgpasswd = PyBytes_AsString(pg_default_passwd); if (!(conn_obj = PyObject_New(connObject, &connType))) { set_error_msg(InternalError, "Can't create new connection object"); return NULL; } conn_obj->valid = 1; conn_obj->cnx = NULL; conn_obj->date_format = date_format; conn_obj->cast_hook = NULL; conn_obj->notice_receiver = NULL; if (pghost) { keywords[nkw] = "host"; values[nkw++] = pghost; } if (pgopt) { keywords[nkw] = "options"; values[nkw++] = pgopt; } if (pgdbname) { keywords[nkw] = "dbname"; values[nkw++] = pgdbname; } if (pguser) { keywords[nkw] = "user"; values[nkw++] = pguser; } if (pgpasswd) { keywords[nkw] = "password"; values[nkw++] = pgpasswd; } if (pgport != -1) { memset(port_buffer, 0, sizeof(port_buffer)); sprintf(port_buffer, "%d", pgport); keywords[nkw] = "port"; values[nkw++] = port_buffer; } keywords[nkw] = values[nkw] = NULL; Py_BEGIN_ALLOW_THREADS conn_obj->cnx = nowait ? PQconnectStartParams(keywords, values, 1) : PQconnectdbParams(keywords, values, 1); Py_END_ALLOW_THREADS if (PQstatus(conn_obj->cnx) == CONNECTION_BAD) { set_error(InternalError, "Cannot connect", conn_obj->cnx, NULL); Py_XDECREF(conn_obj); return NULL; } return (PyObject *)conn_obj; } /* Get version of libpq that is being used */ static char pg_get_pqlib_version__doc__[] = "get_pqlib_version() -- get the version of libpq that is being used"; static PyObject * pg_get_pqlib_version(PyObject *self, PyObject *noargs) { return PyLong_FromLong(PQlibVersion()); } /* Escape string */ static char pg_escape_string__doc__[] = "escape_string(string) -- escape a string for use within SQL"; static PyObject * pg_escape_string(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &from, &from_length); } else if (PyUnicode_Check(string)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(string, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_string() expects a string as argument"); return NULL; } to_length = 2 * (size_t)from_length + 1; if ((Py_ssize_t)to_length < from_length) { /* overflow */ to_length = (size_t)from_length; from_length = (from_length - 1) / 2; } to = (char *)PyMem_Malloc(to_length); to_length = (size_t)PQescapeString(to, from, (size_t)from_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length, encoding); PyMem_Free(to); return to_obj; } /* Escape bytea */ static char pg_escape_bytea__doc__[] = "escape_bytea(data) -- escape binary data for use within SQL as type " "bytea"; static PyObject * pg_escape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ int encoding = -1; /* client encoding */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { encoding = pg_encoding_ascii; tmp_obj = get_encoded_string(data, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString(PyExc_TypeError, "Method escape_bytea() expects a string as argument"); return NULL; } to = (char *)PQescapeBytea((unsigned char *)from, (size_t)from_length, &to_length); Py_XDECREF(tmp_obj); if (encoding == -1) to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length - 1); else to_obj = get_decoded_string(to, (Py_ssize_t)to_length - 1, encoding); if (to) PQfreemem(to); return to_obj; } /* Unescape bytea */ static char pg_unescape_bytea__doc__[] = "unescape_bytea(string) -- unescape bytea data retrieved as text"; static PyObject * pg_unescape_bytea(PyObject *self, PyObject *data) { PyObject *tmp_obj = NULL, /* auxiliary string object */ *to_obj; /* string object to return */ char *from, /* our string argument as encoded string */ *to; /* the result as encoded string */ Py_ssize_t from_length; /* length of string */ size_t to_length; /* length of result */ if (PyBytes_Check(data)) { PyBytes_AsStringAndSize(data, &from, &from_length); } else if (PyUnicode_Check(data)) { tmp_obj = get_encoded_string(data, pg_encoding_ascii); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &from, &from_length); } else { PyErr_SetString( PyExc_TypeError, "Method unescape_bytea() expects a string as argument"); return NULL; } to = (char *)PQunescapeBytea((unsigned char *)from, &to_length); Py_XDECREF(tmp_obj); if (!to) return PyErr_NoMemory(); to_obj = PyBytes_FromStringAndSize(to, (Py_ssize_t)to_length); PQfreemem(to); return to_obj; } /* Set fixed datestyle. */ static char pg_set_datestyle__doc__[] = "set_datestyle(style) -- set which style is assumed"; static PyObject * pg_set_datestyle(PyObject *self, PyObject *args) { const char *datestyle = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &datestyle)) { PyErr_SetString( PyExc_TypeError, "Function set_datestyle() expects a string or None as argument"); return NULL; } date_format = datestyle ? date_style_to_format(datestyle) : NULL; Py_INCREF(Py_None); return Py_None; } /* Get fixed datestyle. */ static char pg_get_datestyle__doc__[] = "get_datestyle() -- get which date style is assumed"; static PyObject * pg_get_datestyle(PyObject *self, PyObject *noargs) { if (date_format) { return PyUnicode_FromString(date_format_to_style(date_format)); } else { Py_INCREF(Py_None); return Py_None; } } /* Get decimal point. */ static char pg_get_decimal_point__doc__[] = "get_decimal_point() -- get decimal point to be used for money values"; static PyObject * pg_get_decimal_point(PyObject *self, PyObject *noargs) { PyObject *ret; char s[2]; if (decimal_point) { s[0] = decimal_point; s[1] = '\0'; ret = PyUnicode_FromString(s); } else { Py_INCREF(Py_None); ret = Py_None; } return ret; } /* Set decimal point. */ static char pg_set_decimal_point__doc__[] = "set_decimal_point(char) -- set decimal point to be used for money values"; static PyObject * pg_set_decimal_point(PyObject *self, PyObject *args) { PyObject *ret = NULL; char *s = NULL; /* gets arguments */ if (PyArg_ParseTuple(args, "z", &s)) { if (!s) s = "\0"; else if (*s && (*(s + 1) || !strchr(".,;: '*/_`|", *s))) s = NULL; } if (s) { decimal_point = *s; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal_mark() expects" " a decimal mark character as argument"); } return ret; } /* Get decimal type. */ static char pg_get_decimal__doc__[] = "get_decimal() -- get the decimal type to be used for numeric values"; static PyObject * pg_get_decimal(PyObject *self, PyObject *noargs) { PyObject *ret; ret = decimal ? decimal : Py_None; Py_INCREF(ret); return ret; } /* Set decimal type. */ static char pg_set_decimal__doc__[] = "set_decimal(cls) -- set a decimal type to be used for numeric values"; static PyObject * pg_set_decimal(PyObject *self, PyObject *cls) { PyObject *ret = NULL; if (cls == Py_None) { Py_XDECREF(decimal); decimal = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(cls)) { Py_XINCREF(cls); Py_XDECREF(decimal); decimal = cls; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_decimal() expects" " a callable or None as argument"); } return ret; } /* Get usage of bool values. */ static char pg_get_bool__doc__[] = "get_bool() -- check whether boolean values are converted to bool"; static PyObject * pg_get_bool(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bool_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bool__doc__[] = "set_bool(on) -- set whether boolean values should be converted to bool"; static PyObject * pg_set_bool(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bool_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_bool() expects a boolean value as argument"); } return ret; } /* Get conversion of arrays to lists. */ static char pg_get_array__doc__[] = "get_array() -- check whether arrays are converted as lists"; static PyObject * pg_get_array(PyObject *self, PyObject *noargs) { PyObject *ret; ret = array_as_text ? Py_False : Py_True; Py_INCREF(ret); return ret; } /* Set conversion of arrays to lists. */ static char pg_set_array__doc__[] = "set_array(on) -- set whether arrays should be converted to lists"; static PyObject * pg_set_array(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { array_as_text = i ? 0 : 1; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString( PyExc_TypeError, "Function set_array() expects a boolean value as argument"); } return ret; } /* Check whether bytea values are unescaped. */ static char pg_get_bytea_escaped__doc__[] = "get_bytea_escaped() -- check whether bytea will be returned escaped"; static PyObject * pg_get_bytea_escaped(PyObject *self, PyObject *noargs) { PyObject *ret; ret = bytea_escaped ? Py_True : Py_False; Py_INCREF(ret); return ret; } /* Set usage of bool values. */ static char pg_set_bytea_escaped__doc__[] = "set_bytea_escaped(on) -- set whether bytea will be returned escaped"; static PyObject * pg_set_bytea_escaped(PyObject *self, PyObject *args) { PyObject *ret = NULL; int i; /* gets arguments */ if (PyArg_ParseTuple(args, "i", &i)) { bytea_escaped = i ? 1 : 0; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function set_bytea_escaped() expects" " a boolean value as argument"); } return ret; } /* set query helper functions (not part of public API) */ static char pg_set_query_helpers__doc__[] = "set_query_helpers(*helpers) -- set internal query helper functions"; static PyObject * pg_set_query_helpers(PyObject *self, PyObject *args) { /* gets arguments */ if (!PyArg_ParseTuple(args, "O!O!O!O!", &PyFunction_Type, &dictiter, &PyFunction_Type, &namediter, &PyFunction_Type, &namednext, &PyFunction_Type, &scalariter)) { return NULL; } Py_INCREF(Py_None); return Py_None; } /* Get json decode function. */ static char pg_get_jsondecode__doc__[] = "get_jsondecode() -- get the function used for decoding json results"; static PyObject * pg_get_jsondecode(PyObject *self, PyObject *noargs) { PyObject *ret; ret = jsondecode; if (!ret) ret = Py_None; Py_INCREF(ret); return ret; } /* Set json decode function. */ static char pg_set_jsondecode__doc__[] = "set_jsondecode(func) -- set a function to be used for decoding json " "results"; static PyObject * pg_set_jsondecode(PyObject *self, PyObject *func) { PyObject *ret = NULL; if (func == Py_None) { Py_XDECREF(jsondecode); jsondecode = NULL; Py_INCREF(Py_None); ret = Py_None; } else if (PyCallable_Check(func)) { Py_XINCREF(func); Py_XDECREF(jsondecode); jsondecode = func; Py_INCREF(Py_None); ret = Py_None; } else { PyErr_SetString(PyExc_TypeError, "Function jsondecode() expects" " a callable or None as argument"); } return ret; } /* Get default host. */ static char pg_get_defhost__doc__[] = "get_defhost() -- return default database host"; static PyObject * pg_get_defhost(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_host); return pg_default_host; } /* Set default host. */ static char pg_set_defhost__doc__[] = "set_defhost(string) -- set default database host and return previous " "value"; static PyObject * pg_set_defhost(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defhost() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_host; if (tmp) { pg_default_host = PyUnicode_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_host = Py_None; } return old; } /* Get default database. */ static char pg_get_defbase__doc__[] = "get_defbase() -- return default database name"; static PyObject * pg_get_defbase(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_base); return pg_default_base; } /* Set default database. */ static char pg_set_defbase__doc__[] = "set_defbase(string) -- set default database name and return previous " "value"; static PyObject * pg_set_defbase(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defbase() Argument a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_base; if (tmp) { pg_default_base = PyUnicode_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_base = Py_None; } return old; } /* Get default options. */ static char pg_get_defopt__doc__[] = "get_defopt() -- return default database options"; static PyObject * pg_get_defopt(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_opt); return pg_default_opt; } /* Set default options. */ static char pg_set_defopt__doc__[] = "set_defopt(string) -- set default options and return previous value"; static PyObject * pg_setdefopt(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defopt() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_opt; if (tmp) { pg_default_opt = PyUnicode_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_opt = Py_None; } return old; } /* Get default username. */ static char pg_get_defuser__doc__[] = "get_defuser() -- return default database username"; static PyObject * pg_get_defuser(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_user); return pg_default_user; } /* Set default username. */ static char pg_set_defuser__doc__[] = "set_defuser(name) -- set default username and return previous value"; static PyObject * pg_set_defuser(PyObject *self, PyObject *args) { char *tmp = NULL; PyObject *old; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defuser() expects a string or None as argument"); return NULL; } /* adjusts value */ old = pg_default_user; if (tmp) { pg_default_user = PyUnicode_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_user = Py_None; } return old; } /* Set default password. */ static char pg_set_defpasswd__doc__[] = "set_defpasswd(password) -- set default database password"; static PyObject * pg_set_defpasswd(PyObject *self, PyObject *args) { char *tmp = NULL; /* gets arguments */ if (!PyArg_ParseTuple(args, "z", &tmp)) { PyErr_SetString( PyExc_TypeError, "Function set_defpasswd() expects a string or None as argument"); return NULL; } if (tmp) { pg_default_passwd = PyUnicode_FromString(tmp); } else { Py_INCREF(Py_None); pg_default_passwd = Py_None; } Py_INCREF(Py_None); return Py_None; } /* Get default port. */ static char pg_get_defport__doc__[] = "get_defport() -- return default database port"; static PyObject * pg_get_defport(PyObject *self, PyObject *noargs) { Py_XINCREF(pg_default_port); return pg_default_port; } /* Set default port. */ static char pg_set_defport__doc__[] = "set_defport(port) -- set default port and return previous value"; static PyObject * pg_set_defport(PyObject *self, PyObject *args) { long int port = -2; PyObject *old; /* gets arguments */ if ((!PyArg_ParseTuple(args, "l", &port)) || (port < -1)) { PyErr_SetString(PyExc_TypeError, "Function set_deport expects" " a positive integer or -1 as argument"); return NULL; } /* adjusts value */ old = pg_default_port; if (port != -1) { pg_default_port = PyLong_FromLong(port); } else { Py_INCREF(Py_None); pg_default_port = Py_None; } return old; } /* Cast a string with a text representation of an array to a list. */ static char pg_cast_array__doc__[] = "cast_array(string, cast=None, delim=',') -- cast a string as an array"; PyObject * pg_cast_array(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size; int encoding; if (!PyArg_ParseTupleAndKeywords(args, dict, "O|Oc", (char **)kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a string as first argument"); return NULL; } if (cast_obj == Py_None) { cast_obj = NULL; } else if (cast_obj && !PyCallable_Check(cast_obj)) { PyErr_SetString( PyExc_TypeError, "Function cast_array() expects a callable as second argument"); return NULL; } ret = cast_array(string, size, encoding, 0, cast_obj, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of a record to a tuple. */ static char pg_cast_record__doc__[] = "cast_record(string, cast=None, delim=',') -- cast a string as a record"; PyObject * pg_cast_record(PyObject *self, PyObject *args, PyObject *dict) { static const char *kwlist[] = {"string", "cast", "delim", NULL}; PyObject *string_obj, *cast_obj = NULL, *ret; char *string, delim = ','; Py_ssize_t size, len; int encoding; if (!PyArg_ParseTupleAndKeywords(args, dict, "O|Oc", (char **)kwlist, &string_obj, &cast_obj, &delim)) { return NULL; } if (PyBytes_Check(string_obj)) { PyBytes_AsStringAndSize(string_obj, &string, &size); string_obj = NULL; encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string_obj)) { string_obj = PyUnicode_AsUTF8String(string_obj); if (!string_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(string_obj, &string, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_record() expects a string as first argument"); return NULL; } if (!cast_obj || PyCallable_Check(cast_obj)) { len = 0; } else if (cast_obj == Py_None) { cast_obj = NULL; len = 0; } else if (PyTuple_Check(cast_obj) || PyList_Check(cast_obj)) { len = PySequence_Size(cast_obj); if (!len) { cast_obj = NULL; } } else { PyErr_SetString(PyExc_TypeError, "Function cast_record() expects a callable" " or tuple or list of callables as second argument"); return NULL; } ret = cast_record(string, size, encoding, 0, cast_obj, len, delim); Py_XDECREF(string_obj); return ret; } /* Cast a string with a text representation of an hstore to a dict. */ static char pg_cast_hstore__doc__[] = "cast_hstore(string) -- cast a string as an hstore"; PyObject * pg_cast_hstore(PyObject *self, PyObject *string) { PyObject *tmp_obj = NULL, *ret; char *s; Py_ssize_t size; int encoding; if (PyBytes_Check(string)) { PyBytes_AsStringAndSize(string, &s, &size); encoding = pg_encoding_ascii; } else if (PyUnicode_Check(string)) { tmp_obj = PyUnicode_AsUTF8String(string); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &s, &size); encoding = pg_encoding_utf8; } else { PyErr_SetString( PyExc_TypeError, "Function cast_hstore() expects a string as first argument"); return NULL; } ret = cast_hstore(s, size, encoding); Py_XDECREF(tmp_obj); return ret; } /* The list of functions defined in the module */ static struct PyMethodDef pg_methods[] = { {"connect", (PyCFunction)pg_connect, METH_VARARGS | METH_KEYWORDS, pg_connect__doc__}, {"escape_string", (PyCFunction)pg_escape_string, METH_O, pg_escape_string__doc__}, {"escape_bytea", (PyCFunction)pg_escape_bytea, METH_O, pg_escape_bytea__doc__}, {"unescape_bytea", (PyCFunction)pg_unescape_bytea, METH_O, pg_unescape_bytea__doc__}, {"get_datestyle", (PyCFunction)pg_get_datestyle, METH_NOARGS, pg_get_datestyle__doc__}, {"set_datestyle", (PyCFunction)pg_set_datestyle, METH_VARARGS, pg_set_datestyle__doc__}, {"get_decimal_point", (PyCFunction)pg_get_decimal_point, METH_NOARGS, pg_get_decimal_point__doc__}, {"set_decimal_point", (PyCFunction)pg_set_decimal_point, METH_VARARGS, pg_set_decimal_point__doc__}, {"get_decimal", (PyCFunction)pg_get_decimal, METH_NOARGS, pg_get_decimal__doc__}, {"set_decimal", (PyCFunction)pg_set_decimal, METH_O, pg_set_decimal__doc__}, {"get_bool", (PyCFunction)pg_get_bool, METH_NOARGS, pg_get_bool__doc__}, {"set_bool", (PyCFunction)pg_set_bool, METH_VARARGS, pg_set_bool__doc__}, {"get_array", (PyCFunction)pg_get_array, METH_NOARGS, pg_get_array__doc__}, {"set_array", (PyCFunction)pg_set_array, METH_VARARGS, pg_set_array__doc__}, {"set_query_helpers", (PyCFunction)pg_set_query_helpers, METH_VARARGS, pg_set_query_helpers__doc__}, {"get_bytea_escaped", (PyCFunction)pg_get_bytea_escaped, METH_NOARGS, pg_get_bytea_escaped__doc__}, {"set_bytea_escaped", (PyCFunction)pg_set_bytea_escaped, METH_VARARGS, pg_set_bytea_escaped__doc__}, {"get_jsondecode", (PyCFunction)pg_get_jsondecode, METH_NOARGS, pg_get_jsondecode__doc__}, {"set_jsondecode", (PyCFunction)pg_set_jsondecode, METH_O, pg_set_jsondecode__doc__}, {"cast_array", (PyCFunction)pg_cast_array, METH_VARARGS | METH_KEYWORDS, pg_cast_array__doc__}, {"cast_record", (PyCFunction)pg_cast_record, METH_VARARGS | METH_KEYWORDS, pg_cast_record__doc__}, {"cast_hstore", (PyCFunction)pg_cast_hstore, METH_O, pg_cast_hstore__doc__}, {"get_defhost", pg_get_defhost, METH_NOARGS, pg_get_defhost__doc__}, {"set_defhost", pg_set_defhost, METH_VARARGS, pg_set_defhost__doc__}, {"get_defbase", pg_get_defbase, METH_NOARGS, pg_get_defbase__doc__}, {"set_defbase", pg_set_defbase, METH_VARARGS, pg_set_defbase__doc__}, {"get_defopt", pg_get_defopt, METH_NOARGS, pg_get_defopt__doc__}, {"set_defopt", pg_setdefopt, METH_VARARGS, pg_set_defopt__doc__}, {"get_defport", pg_get_defport, METH_NOARGS, pg_get_defport__doc__}, {"set_defport", pg_set_defport, METH_VARARGS, pg_set_defport__doc__}, {"get_defuser", pg_get_defuser, METH_NOARGS, pg_get_defuser__doc__}, {"set_defuser", pg_set_defuser, METH_VARARGS, pg_set_defuser__doc__}, {"set_defpasswd", pg_set_defpasswd, METH_VARARGS, pg_set_defpasswd__doc__}, {"get_pqlib_version", (PyCFunction)pg_get_pqlib_version, METH_NOARGS, pg_get_pqlib_version__doc__}, {NULL, NULL} /* sentinel */ }; static char pg__doc__[] = "Python interface to PostgreSQL DB"; static struct PyModuleDef moduleDef = { PyModuleDef_HEAD_INIT, "_pg", /* m_name */ pg__doc__, /* m_doc */ -1, /* m_size */ pg_methods /* m_methods */ }; /* Initialization function for the module */ PyMODINIT_FUNC PyInit__pg(void); PyMODINIT_FUNC PyInit__pg(void) { PyObject *mod, *dict, *s; /* Create the module and add the functions */ mod = PyModule_Create(&moduleDef); /* Initialize here because some Windows platforms get confused otherwise */ connType.tp_base = noticeType.tp_base = queryType.tp_base = sourceType.tp_base = &PyBaseObject_Type; largeType.tp_base = &PyBaseObject_Type; if (PyType_Ready(&connType) || PyType_Ready(¬iceType) || PyType_Ready(&queryType) || PyType_Ready(&sourceType) || PyType_Ready(&largeType)) { return NULL; } dict = PyModule_GetDict(mod); /* Exceptions as defined by DB-API 2.0 */ Error = PyErr_NewException("pg.Error", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Error", Error); Warning = PyErr_NewException("pg.Warning", PyExc_Exception, NULL); PyDict_SetItemString(dict, "Warning", Warning); InterfaceError = PyErr_NewException("pg.InterfaceError", Error, NULL); PyDict_SetItemString(dict, "InterfaceError", InterfaceError); DatabaseError = PyErr_NewException("pg.DatabaseError", Error, NULL); PyDict_SetItemString(dict, "DatabaseError", DatabaseError); InternalError = PyErr_NewException("pg.InternalError", DatabaseError, NULL); PyDict_SetItemString(dict, "InternalError", InternalError); OperationalError = PyErr_NewException("pg.OperationalError", DatabaseError, NULL); PyDict_SetItemString(dict, "OperationalError", OperationalError); ProgrammingError = PyErr_NewException("pg.ProgrammingError", DatabaseError, NULL); PyDict_SetItemString(dict, "ProgrammingError", ProgrammingError); IntegrityError = PyErr_NewException("pg.IntegrityError", DatabaseError, NULL); PyDict_SetItemString(dict, "IntegrityError", IntegrityError); DataError = PyErr_NewException("pg.DataError", DatabaseError, NULL); PyDict_SetItemString(dict, "DataError", DataError); NotSupportedError = PyErr_NewException("pg.NotSupportedError", DatabaseError, NULL); PyDict_SetItemString(dict, "NotSupportedError", NotSupportedError); InvalidResultError = PyErr_NewException("pg.InvalidResultError", DataError, NULL); PyDict_SetItemString(dict, "InvalidResultError", InvalidResultError); NoResultError = PyErr_NewException("pg.NoResultError", InvalidResultError, NULL); PyDict_SetItemString(dict, "NoResultError", NoResultError); MultipleResultsError = PyErr_NewException("pg.MultipleResultsError", InvalidResultError, NULL); PyDict_SetItemString(dict, "MultipleResultsError", MultipleResultsError); /* Types */ Connection = (PyObject *)&connType; PyDict_SetItemString(dict, "Connection", Connection); Query = (PyObject *)&queryType; PyDict_SetItemString(dict, "Query", Query); LargeObject = (PyObject *)&largeType; PyDict_SetItemString(dict, "LargeObject", LargeObject); /* Make the version available */ s = PyUnicode_FromString(PyPgVersion); PyDict_SetItemString(dict, "version", s); PyDict_SetItemString(dict, "__version__", s); Py_DECREF(s); /* Result types for queries */ PyDict_SetItemString(dict, "RESULT_EMPTY", PyLong_FromLong(RESULT_EMPTY)); PyDict_SetItemString(dict, "RESULT_DML", PyLong_FromLong(RESULT_DML)); PyDict_SetItemString(dict, "RESULT_DDL", PyLong_FromLong(RESULT_DDL)); PyDict_SetItemString(dict, "RESULT_DQL", PyLong_FromLong(RESULT_DQL)); /* Transaction states */ PyDict_SetItemString(dict, "TRANS_IDLE", PyLong_FromLong(PQTRANS_IDLE)); PyDict_SetItemString(dict, "TRANS_ACTIVE", PyLong_FromLong(PQTRANS_ACTIVE)); PyDict_SetItemString(dict, "TRANS_INTRANS", PyLong_FromLong(PQTRANS_INTRANS)); PyDict_SetItemString(dict, "TRANS_INERROR", PyLong_FromLong(PQTRANS_INERROR)); PyDict_SetItemString(dict, "TRANS_UNKNOWN", PyLong_FromLong(PQTRANS_UNKNOWN)); /* Polling results */ PyDict_SetItemString(dict, "POLLING_OK", PyLong_FromLong(PGRES_POLLING_OK)); PyDict_SetItemString(dict, "POLLING_FAILED", PyLong_FromLong(PGRES_POLLING_FAILED)); PyDict_SetItemString(dict, "POLLING_READING", PyLong_FromLong(PGRES_POLLING_READING)); PyDict_SetItemString(dict, "POLLING_WRITING", PyLong_FromLong(PGRES_POLLING_WRITING)); /* Create mode for large objects */ PyDict_SetItemString(dict, "INV_READ", PyLong_FromLong(INV_READ)); PyDict_SetItemString(dict, "INV_WRITE", PyLong_FromLong(INV_WRITE)); /* Position flags for lo_lseek */ PyDict_SetItemString(dict, "SEEK_SET", PyLong_FromLong(SEEK_SET)); PyDict_SetItemString(dict, "SEEK_CUR", PyLong_FromLong(SEEK_CUR)); PyDict_SetItemString(dict, "SEEK_END", PyLong_FromLong(SEEK_END)); /* Prepare default values */ Py_INCREF(Py_None); pg_default_host = Py_None; Py_INCREF(Py_None); pg_default_base = Py_None; Py_INCREF(Py_None); pg_default_opt = Py_None; Py_INCREF(Py_None); pg_default_port = Py_None; Py_INCREF(Py_None); pg_default_user = Py_None; Py_INCREF(Py_None); pg_default_passwd = Py_None; /* Store common pg encoding ids */ pg_encoding_utf8 = pg_char_to_encoding("UTF8"); pg_encoding_latin1 = pg_char_to_encoding("LATIN1"); pg_encoding_ascii = pg_char_to_encoding("SQL_ASCII"); /* Check for errors */ if (PyErr_Occurred()) { return NULL; } return mod; } PyGreSQL-PyGreSQL-166b135/ext/pgnotice.c000066400000000000000000000075501450706350600175660ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The notice object - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Get notice object attributes. */ static PyObject * notice_getattr(noticeObject *self, PyObject *nameobj) { PGresult const *res = self->res; const char *name = PyUnicode_AsUTF8(nameobj); int fieldcode; if (!res) { PyErr_SetString(PyExc_TypeError, "Cannot get current notice"); return NULL; } /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (self->pgcnx && _check_cnx_obj(self->pgcnx)) { Py_INCREF(self->pgcnx); return (PyObject *)self->pgcnx; } else { Py_INCREF(Py_None); return Py_None; } } /* full message */ if (!strcmp(name, "message")) { return PyUnicode_FromString(PQresultErrorMessage(res)); } /* other possible fields */ fieldcode = 0; if (!strcmp(name, "severity")) fieldcode = PG_DIAG_SEVERITY; else if (!strcmp(name, "primary")) fieldcode = PG_DIAG_MESSAGE_PRIMARY; else if (!strcmp(name, "detail")) fieldcode = PG_DIAG_MESSAGE_DETAIL; else if (!strcmp(name, "hint")) fieldcode = PG_DIAG_MESSAGE_HINT; if (fieldcode) { char *s = PQresultErrorField(res, fieldcode); if (s) { return PyUnicode_FromString(s); } else { Py_INCREF(Py_None); return Py_None; } } return PyObject_GenericGetAttr((PyObject *)self, nameobj); } /* Get the list of notice attributes. */ static PyObject * notice_dir(noticeObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *)self)); PyObject_CallMethod(attrs, "extend", "[ssssss]", "pgcnx", "severity", "message", "primary", "detail", "hint"); return attrs; } /* Return notice as string in human readable form. */ static PyObject * notice_str(noticeObject *self) { return notice_getattr(self, PyBytes_FromString("message")); } /* Notice object methods */ static struct PyMethodDef notice_methods[] = { {"__dir__", (PyCFunction)notice_dir, METH_NOARGS, NULL}, {NULL, NULL}}; static char notice__doc__[] = "PostgreSQL notice object"; /* Notice type definition */ static PyTypeObject noticeType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Notice", /* tp_name */ sizeof(noticeObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ 0, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc)notice_str, /* tp_str */ (getattrofunc)notice_getattr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ notice__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ notice_methods, /* tp_methods */ }; PyGreSQL-PyGreSQL-166b135/ext/pgquery.c000066400000000000000000000740571450706350600174600ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The query object - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate the query object. */ static void query_dealloc(queryObject *self) { Py_XDECREF(self->pgcnx); if (self->col_types) { PyMem_Free(self->col_types); } if (self->result) { PQclear(self->result); } PyObject_Del(self); } /* Return query as string in human readable form. */ static PyObject * query_str(queryObject *self) { return format_result(self->result); } /* Return length of a query object. */ static Py_ssize_t query_len(PyObject *self) { PyObject *tmp; Py_ssize_t len; tmp = PyLong_FromLong(((queryObject *)self)->max_row); len = PyLong_AsSsize_t(tmp); Py_DECREF(tmp); return len; } /* Return the value in the given column of the current row. */ static PyObject * _query_value_in_column(queryObject *self, int column) { char *s; int type; if (PQgetisnull(self->result, self->current_row, column)) { Py_INCREF(Py_None); return Py_None; } /* get the string representation of the value */ /* note: this is always null-terminated text format */ s = PQgetvalue(self->result, self->current_row, column); /* get the PyGreSQL type of the column */ type = self->col_types[column]; /* cast the string representation into a Python object */ if (type & PYGRES_ARRAY) return cast_array(s, PQgetlength(self->result, self->current_row, column), self->encoding, type, NULL, 0); if (type == PYGRES_BYTEA) return cast_bytea_text(s); if (type == PYGRES_OTHER) return cast_other(s, PQgetlength(self->result, self->current_row, column), self->encoding, PQftype(self->result, column), self->pgcnx->cast_hook); if (type & PYGRES_TEXT) return cast_sized_text( s, PQgetlength(self->result, self->current_row, column), self->encoding, type); return cast_unsized_simple(s, type); } /* Return the current row as a tuple. */ static PyObject * _query_row_as_tuple(queryObject *self) { PyObject *row_tuple = NULL; int j; if (!(row_tuple = PyTuple_New(self->num_fields))) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_tuple); return NULL; } PyTuple_SET_ITEM(row_tuple, j, val); } return row_tuple; } /* Fetch the result if this is an asynchronous query and it has not yet been fetched in this round-trip. Also mark whether the result should be kept for this round-trip (e.g. to be used in an iterator). If this is a normal query result, the query itself will be returned, otherwise a result value will be returned that shall be passed on. */ static PyObject * _get_async_result(queryObject *self, int keep) { int fetch = 0; if (self->async) { if (self->async == 1) { fetch = 1; if (keep) { /* mark query as fetched, do not fetch again */ self->async = 2; } } else if (!keep) { self->async = 1; } } if (fetch) { int status; if (!self->pgcnx) { PyErr_SetString(PyExc_TypeError, "Connection is not valid"); return NULL; } Py_BEGIN_ALLOW_THREADS if (self->result) { PQclear(self->result); } self->result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS if (!self->result) { /* end of result set, return None */ self->max_row = 0; self->num_fields = 0; self->col_types = NULL; Py_INCREF(Py_None); return Py_None; } if ((status = PQresultStatus(self->result)) != PGRES_TUPLES_OK) { PyObject *result = _conn_non_query_result(status, self->result, self->pgcnx->cnx); self->result = NULL; /* since this has been already cleared */ if (!result) { /* Raise an error. We need to call PQgetResult() to clear the connection state. This should return NULL the first time. */ self->result = PQgetResult(self->pgcnx->cnx); while (self->result) { PQclear(self->result); self->result = PQgetResult(self->pgcnx->cnx); Py_DECREF(self->pgcnx); self->pgcnx = NULL; } } else if (result == Py_None) { /* It would be confusing to return None here because the caller has to call again until we return None. We can't just consume that final None because we don't know if there are additional statements following this one, so we return an empty string where query() would return None. */ Py_DECREF(result); result = PyUnicode_FromString(""); } return result; } self->max_row = PQntuples(self->result); self->num_fields = PQnfields(self->result); self->col_types = get_col_types(self->result, self->num_fields); if (!self->col_types) { Py_DECREF(self); Py_DECREF(self); return NULL; } } else if (self->async == 2 && !self->max_row && !self->num_fields && !self->col_types) { Py_INCREF(Py_None); return Py_None; } /* return the query object itself as sentinel for a normal query result */ return (PyObject *)self; } /* Return given item from a query object. */ static PyObject * query_getitem(PyObject *self, Py_ssize_t i) { queryObject *q = (queryObject *)self; PyObject *tmp; long row; if ((tmp = _get_async_result(q, 0)) != (PyObject *)self) return tmp; tmp = PyLong_FromSize_t((size_t)i); row = PyLong_AsLong(tmp); Py_DECREF(tmp); if (row < 0 || row >= q->max_row) { PyErr_SetNone(PyExc_IndexError); return NULL; } q->current_row = (int)row; return _query_row_as_tuple(q); } /* __iter__() method of the queryObject: Returns the default iterator yielding rows as tuples. */ static PyObject * query_iter(queryObject *self) { PyObject *res; if ((res = _get_async_result(self, 0)) != (PyObject *)self) return res; self->current_row = 0; Py_INCREF(self); return (PyObject *)self; } /* __next__() method of the queryObject: Returns the current row as a tuple and moves to the next one. */ static PyObject * query_next(queryObject *self, PyObject *noargs) { PyObject *row_tuple = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; return row_tuple; } /* Get number of bytes allocated for PGresult object */ static char query_memsize__doc__[] = "memsize() -- return number of bytes allocated by query result"; static PyObject * query_memsize(queryObject *self, PyObject *noargs) { #ifdef MEMORY_SIZE return PyLong_FromSize_t(PQresultMemorySize(self->result)); #else set_error_msg(NotSupportedError, "Memory size functions not supported"); return NULL; #endif /* MEMORY_SIZE */ } /* List field names from query result. */ static char query_listfields__doc__[] = "listfields() -- List field names from result"; static PyObject * query_listfields(queryObject *self, PyObject *noargs) { int i; char *name; PyObject *fieldstuple, *str; /* builds tuple */ fieldstuple = PyTuple_New(self->num_fields); if (fieldstuple) { for (i = 0; i < self->num_fields; ++i) { name = PQfname(self->result, i); str = PyUnicode_FromString(name); PyTuple_SET_ITEM(fieldstuple, i, str); } } return fieldstuple; } /* Get field name from number in last result. */ static char query_fieldname__doc__[] = "fieldname(num) -- return name of field from result from its position"; static PyObject * query_fieldname(queryObject *self, PyObject *args) { int i; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "i", &i)) { PyErr_SetString(PyExc_TypeError, "Method fieldname() takes an integer as argument"); return NULL; } /* checks number validity */ if (i >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Invalid field number"); return NULL; } /* gets fields name and builds object */ name = PQfname(self->result, i); return PyUnicode_FromString(name); } /* Get field number from name in last result. */ static char query_fieldnum__doc__[] = "fieldnum(name) -- return position in query for field from its name"; static PyObject * query_fieldnum(queryObject *self, PyObject *args) { int num; char *name; /* gets args */ if (!PyArg_ParseTuple(args, "s", &name)) { PyErr_SetString(PyExc_TypeError, "Method fieldnum() takes a string as argument"); return NULL; } /* gets field number */ if ((num = PQfnumber(self->result, name)) == -1) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return NULL; } return PyLong_FromLong(num); } /* Build a tuple with info for query field with given number. */ static PyObject * _query_build_field_info(PGresult *res, int col_num) { PyObject *info; info = PyTuple_New(4); if (info) { PyTuple_SET_ITEM(info, 0, PyUnicode_FromString(PQfname(res, col_num))); PyTuple_SET_ITEM(info, 1, PyLong_FromLong((long)PQftype(res, col_num))); PyTuple_SET_ITEM(info, 2, PyLong_FromLong(PQfsize(res, col_num))); PyTuple_SET_ITEM(info, 3, PyLong_FromLong(PQfmod(res, col_num))); } return info; } /* Get information on one or all fields of the query result. */ static char query_fieldinfo__doc__[] = "fieldinfo([name]) -- return information about field(s) in query result"; static PyObject * query_fieldinfo(queryObject *self, PyObject *args) { PyObject *result, *field = NULL; int num; /* gets args */ if (!PyArg_ParseTuple(args, "|O", &field)) { PyErr_SetString(PyExc_TypeError, "Method fieldinfo() takes one optional argument only"); return NULL; } /* check optional field arg */ if (field) { /* gets field number */ if (PyBytes_Check(field)) { num = PQfnumber(self->result, PyBytes_AsString(field)); } else if (PyUnicode_Check(field)) { PyObject *tmp = get_encoded_string(field, self->encoding); if (!tmp) return NULL; num = PQfnumber(self->result, PyBytes_AsString(tmp)); Py_DECREF(tmp); } else if (PyLong_Check(field)) { num = (int)PyLong_AsLong(field); } else { PyErr_SetString(PyExc_TypeError, "Field should be given as column number or name"); return NULL; } if (num < 0 || num >= self->num_fields) { PyErr_SetString(PyExc_IndexError, "Unknown field"); return NULL; } return _query_build_field_info(self->result, num); } if (!(result = PyTuple_New(self->num_fields))) { return NULL; } for (num = 0; num < self->num_fields; ++num) { PyObject *info = _query_build_field_info(self->result, num); if (!info) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, num, info); } return result; } /* Retrieve one row from the result as a tuple. */ static char query_one__doc__[] = "one() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_one(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if ((row_tuple = _get_async_result(self, 0)) == (PyObject *)self) { if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; } return row_tuple; } /* Retrieve the single row from the result as a tuple. */ static char query_single__doc__[] = "single() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a tuple of fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one " "row,\n" "which will be of type NoResultError or MultipleResultsError " "specifically.\n"; static PyObject * query_single(queryObject *self, PyObject *noargs) { PyObject *row_tuple; if ((row_tuple = _get_async_result(self, 0)) == (PyObject *)self) { if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_tuple = _query_row_as_tuple(self); if (row_tuple) ++self->current_row; } return row_tuple; } /* Retrieve the last query result as a list of tuples. */ static char query_getresult__doc__[] = "getresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a tuple of fields\n" "in the order returned by the server.\n"; static PyObject * query_getresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if ((result_list = _get_async_result(self, 0)) == (PyObject *)self) { if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_tuple = query_next(self, noargs); if (!row_tuple) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_tuple); } } return result_list; } /* Return the current row as a dict. */ static PyObject * _query_row_as_dict(queryObject *self) { PyObject *row_dict = NULL; int j; if (!(row_dict = PyDict_New())) { return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *val = _query_value_in_column(self, j); if (!val) { Py_DECREF(row_dict); return NULL; } PyDict_SetItemString(row_dict, PQfname(self->result, j), val); Py_DECREF(val); } return row_dict; } /* Return the current row as a dict and move to the next one. */ static PyObject * query_next_dict(queryObject *self, PyObject *noargs) { PyObject *row_dict = NULL; if (self->current_row >= self->max_row) { PyErr_SetNone(PyExc_StopIteration); return NULL; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; return row_dict; } /* Retrieve one row from the result as a dictionary. */ static char query_onedict__doc__[] = "onedict() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onedict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if ((row_dict = _get_async_result(self, 0)) == (PyObject *)self) { if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; } return row_dict; } /* Retrieve the single row from the result as a dictionary. */ static char query_singledict__doc__[] = "singledict() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as a dictionary with\n" "the field names used as the keys.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one " "row,\n" "which will be of type NoResultError or MultipleResultsError " "specifically.\n"; static PyObject * query_singledict(queryObject *self, PyObject *noargs) { PyObject *row_dict; if ((row_dict = _get_async_result(self, 0)) == (PyObject *)self) { if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; row_dict = _query_row_as_dict(self); if (row_dict) ++self->current_row; } return row_dict; } /* Retrieve the last query result as a list of dictionaries. */ static char query_dictresult__doc__[] = "dictresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a dictionary with\n" "the field names used as the keys.\n"; static PyObject * query_dictresult(queryObject *self, PyObject *noargs) { PyObject *result_list; int i; if ((result_list = _get_async_result(self, 0)) == (PyObject *)self) { if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (i = self->current_row = 0; i < self->max_row; ++i) { PyObject *row_dict = query_next_dict(self, noargs); if (!row_dict) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, i, row_dict); } } return result_list; } /* Retrieve last result as iterator of dictionaries. */ static char query_dictiter__doc__[] = "dictiter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a a dictionary\n" "with the field names used as the keys.\n"; static PyObject * query_dictiter(queryObject *self, PyObject *noargs) { PyObject *res; if (!dictiter) { return query_dictresult(self, noargs); } if ((res = _get_async_result(self, 1)) != (PyObject *)self) return res; return PyObject_CallFunction(dictiter, "(O)", self); } /* Retrieve one row from the result as a named tuple. */ static char query_onenamed__doc__[] = "onenamed() -- Get one row from the result of a query\n\n" "Only one row from the result is returned as a named tuple of fields.\n" "This method can be called multiple times to return more rows.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onenamed(queryObject *self, PyObject *noargs) { PyObject *res; if (!namednext) { return query_one(self, noargs); } if ((res = _get_async_result(self, 1)) != (PyObject *)self) return res; if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve the single row from the result as a tuple. */ static char query_singlenamed__doc__[] = "singlenamed() -- Get the result of a query as single row\n\n" "The single row from the query result is returned as named tuple of " "fields.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one " "row,\n" "which will be of type NoResultError or MultipleResultsError " "specifically.\n"; static PyObject * query_singlenamed(queryObject *self, PyObject *noargs) { PyObject *res; if (!namednext) { return query_single(self, noargs); } if ((res = _get_async_result(self, 1)) != (PyObject *)self) return res; if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; return PyObject_CallFunction(namednext, "(O)", self); } /* Retrieve last result as list of named tuples. */ static char query_namedresult__doc__[] = "namedresult() -- Get the result of a query\n\n" "The result is returned as a list of rows, each one a named tuple of " "fields\n" "in the order returned by the server.\n"; static PyObject * query_namedresult(queryObject *self, PyObject *noargs) { PyObject *res, *res_list; if (!namediter) { return query_getresult(self, noargs); } if ((res_list = _get_async_result(self, 1)) == (PyObject *)self) { res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (PyList_Check(res)) return res; res_list = PySequence_List(res); Py_DECREF(res); } return res_list; } /* Retrieve last result as iterator of named tuples. */ static char query_namediter__doc__[] = "namediter() -- Get the result of a query\n\n" "The result is returned as an iterator of rows, each one a named tuple\n" "of fields in the order returned by the server.\n"; static PyObject * query_namediter(queryObject *self, PyObject *noargs) { PyObject *res, *res_iter; if (!namediter) { return query_iter(self); } if ((res_iter = _get_async_result(self, 1)) == (PyObject *)self) { res = PyObject_CallFunction(namediter, "(O)", self); if (!res) return NULL; if (!PyList_Check(res)) return res; res_iter = (Py_TYPE(res)->tp_iter)((PyObject *)self); Py_DECREF(res); } return res_iter; } /* Retrieve the last query result as a list of scalar values. */ static char query_scalarresult__doc__[] = "scalarresult() -- Get query result as scalars\n\n" "The result is returned as a list of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalarresult(queryObject *self, PyObject *noargs) { PyObject *result_list; if ((result_list = _get_async_result(self, 0)) == (PyObject *)self) { if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (!(result_list = PyList_New(self->max_row))) { return NULL; } for (self->current_row = 0; self->current_row < self->max_row; ++self->current_row) { PyObject *value = _query_value_in_column(self, 0); if (!value) { Py_DECREF(result_list); return NULL; } PyList_SET_ITEM(result_list, self->current_row, value); } } return result_list; } /* Retrieve the last query result as iterator of scalar values. */ static char query_scalariter__doc__[] = "scalariter() -- Get query result as scalars\n\n" "The result is returned as an iterator of scalar values where the values\n" "are the first fields of the rows in the order returned by the server.\n"; static PyObject * query_scalariter(queryObject *self, PyObject *noargs) { PyObject *res; if (!scalariter) { return query_scalarresult(self, noargs); } if ((res = _get_async_result(self, 1)) != (PyObject *)self) return res; if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } return PyObject_CallFunction(scalariter, "(O)", self); } /* Retrieve one result as scalar value. */ static char query_onescalar__doc__[] = "onescalar() -- Get one scalar value from the result of a query\n\n" "Returns the first field of the next row from the result as a scalar " "value.\n" "This method can be called multiple times to return more rows as " "scalars.\n" "It returns None if the result does not contain one more row.\n"; static PyObject * query_onescalar(queryObject *self, PyObject *noargs) { PyObject *value; if ((value = _get_async_result(self, 0)) == (PyObject *)self) { if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->current_row >= self->max_row) { Py_INCREF(Py_None); return Py_None; } value = _query_value_in_column(self, 0); if (value) ++self->current_row; } return value; } /* Retrieves the single row from the result as a tuple. */ static char query_singlescalar__doc__[] = "singlescalar() -- Get scalar value from single result of a query\n\n" "Returns the first field of the next row from the result as a scalar " "value.\n" "This method returns the same single row when called multiple times.\n" "It raises an InvalidResultError if the result doesn't have exactly one " "row,\n" "which will be of type NoResultError or MultipleResultsError " "specifically.\n"; static PyObject * query_singlescalar(queryObject *self, PyObject *noargs) { PyObject *value; if ((value = _get_async_result(self, 0)) == (PyObject *)self) { if (!self->num_fields) { set_error_msg(ProgrammingError, "No fields in result"); return NULL; } if (self->max_row != 1) { if (self->max_row) set_error_msg(MultipleResultsError, "Multiple results found"); else set_error_msg(NoResultError, "No result found"); return NULL; } self->current_row = 0; value = _query_value_in_column(self, 0); if (value) ++self->current_row; } return value; } /* Query sequence protocol methods */ static PySequenceMethods query_sequence_methods = { (lenfunc)query_len, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ (ssizeargfunc)query_getitem, /* sq_item */ 0, /* sq_ass_item */ 0, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; /* Query object methods */ static struct PyMethodDef query_methods[] = { {"getresult", (PyCFunction)query_getresult, METH_NOARGS, query_getresult__doc__}, {"dictresult", (PyCFunction)query_dictresult, METH_NOARGS, query_dictresult__doc__}, {"dictiter", (PyCFunction)query_dictiter, METH_NOARGS, query_dictiter__doc__}, {"namedresult", (PyCFunction)query_namedresult, METH_NOARGS, query_namedresult__doc__}, {"namediter", (PyCFunction)query_namediter, METH_NOARGS, query_namediter__doc__}, {"one", (PyCFunction)query_one, METH_NOARGS, query_one__doc__}, {"single", (PyCFunction)query_single, METH_NOARGS, query_single__doc__}, {"onedict", (PyCFunction)query_onedict, METH_NOARGS, query_onedict__doc__}, {"singledict", (PyCFunction)query_singledict, METH_NOARGS, query_singledict__doc__}, {"onenamed", (PyCFunction)query_onenamed, METH_NOARGS, query_onenamed__doc__}, {"singlenamed", (PyCFunction)query_singlenamed, METH_NOARGS, query_singlenamed__doc__}, {"scalarresult", (PyCFunction)query_scalarresult, METH_NOARGS, query_scalarresult__doc__}, {"scalariter", (PyCFunction)query_scalariter, METH_NOARGS, query_scalariter__doc__}, {"onescalar", (PyCFunction)query_onescalar, METH_NOARGS, query_onescalar__doc__}, {"singlescalar", (PyCFunction)query_singlescalar, METH_NOARGS, query_singlescalar__doc__}, {"fieldname", (PyCFunction)query_fieldname, METH_VARARGS, query_fieldname__doc__}, {"fieldnum", (PyCFunction)query_fieldnum, METH_VARARGS, query_fieldnum__doc__}, {"listfields", (PyCFunction)query_listfields, METH_NOARGS, query_listfields__doc__}, {"fieldinfo", (PyCFunction)query_fieldinfo, METH_VARARGS, query_fieldinfo__doc__}, {"memsize", (PyCFunction)query_memsize, METH_NOARGS, query_memsize__doc__}, {NULL, NULL}}; static char query__doc__[] = "PyGreSQL query object"; /* Query type definition */ static PyTypeObject queryType = { PyVarObject_HEAD_INIT(NULL, 0) "pg.Query", /* tp_name */ sizeof(queryObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor)query_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ &query_sequence_methods, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc)query_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ query__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)query_iter, /* tp_iter */ (iternextfunc)query_next, /* tp_iternext */ query_methods, /* tp_methods */ }; PyGreSQL-PyGreSQL-166b135/ext/pgsource.c000066400000000000000000000570771450706350600176160ustar00rootroot00000000000000/* * PyGreSQL - a Python interface for the PostgreSQL database. * * The source object - this file is part a of the C extension module. * * Copyright (c) 2023 by the PyGreSQL Development Team * * Please see the LICENSE.TXT file for specific restrictions. */ /* Deallocate source object. */ static void source_dealloc(sourceObject *self) { if (self->result) PQclear(self->result); Py_XDECREF(self->pgcnx); PyObject_Del(self); } /* Return source object as string in human readable form. */ static PyObject * source_str(sourceObject *self) { switch (self->result_type) { case RESULT_DQL: return format_result(self->result); case RESULT_DDL: case RESULT_DML: return PyUnicode_FromString(PQcmdStatus(self->result)); case RESULT_EMPTY: default: return PyUnicode_FromString("(empty PostgreSQL source object)"); } } /* Check source object validity. */ static int _check_source_obj(sourceObject *self, int level) { if (!self->valid) { set_error_msg(OperationalError, "Object has been closed"); return 0; } if ((level & CHECK_RESULT) && !self->result) { set_error_msg(DatabaseError, "No result"); return 0; } if ((level & CHECK_DQL) && self->result_type != RESULT_DQL) { set_error_msg(DatabaseError, "Last query did not return tuples"); return 0; } if ((level & CHECK_CNX) && !_check_cnx_obj(self->pgcnx)) { return 0; } return 1; } /* Get source object attributes. */ static PyObject * source_getattr(sourceObject *self, PyObject *nameobj) { const char *name = PyUnicode_AsUTF8(nameobj); /* pg connection object */ if (!strcmp(name, "pgcnx")) { if (_check_source_obj(self, 0)) { Py_INCREF(self->pgcnx); return (PyObject *)(self->pgcnx); } Py_INCREF(Py_None); return Py_None; } /* arraysize */ if (!strcmp(name, "arraysize")) return PyLong_FromLong(self->arraysize); /* resulttype */ if (!strcmp(name, "resulttype")) return PyLong_FromLong(self->result_type); /* ntuples */ if (!strcmp(name, "ntuples")) return PyLong_FromLong(self->max_row); /* nfields */ if (!strcmp(name, "nfields")) return PyLong_FromLong(self->num_fields); /* seeks name in methods (fallback) */ return PyObject_GenericGetAttr((PyObject *)self, nameobj); } /* Set source object attributes. */ static int source_setattr(sourceObject *self, char *name, PyObject *v) { /* arraysize */ if (!strcmp(name, "arraysize")) { if (!PyLong_Check(v)) { PyErr_SetString(PyExc_TypeError, "arraysize must be integer"); return -1; } self->arraysize = PyLong_AsLong(v); return 0; } /* unknown attribute */ PyErr_SetString(PyExc_TypeError, "Not a writable attribute"); return -1; } /* Close object. */ static char source_close__doc__[] = "close() -- close source object without deleting it\n\n" "All instances of the source object can no longer be used after this " "call.\n"; static PyObject * source_close(sourceObject *self, PyObject *noargs) { /* frees result if necessary and invalidates object */ if (self->result) { PQclear(self->result); self->result_type = RESULT_EMPTY; self->result = NULL; } self->valid = 0; /* return None */ Py_INCREF(Py_None); return Py_None; } /* Database query. */ static char source_execute__doc__[] = "execute(sql) -- execute a SQL statement (string)\n\n" "On success, this call returns the number of affected rows, or None\n" "for DQL (SELECT, ...) statements. The fetch (fetch(), fetchone()\n" "and fetchall()) methods can be used to get result rows.\n"; static PyObject * source_execute(sourceObject *self, PyObject *sql) { PyObject *tmp_obj = NULL; /* auxiliary string object */ char *query; int encoding; /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } encoding = PQclientEncoding(self->pgcnx->cnx); if (PyBytes_Check(sql)) { query = PyBytes_AsString(sql); } else if (PyUnicode_Check(sql)) { tmp_obj = get_encoded_string(sql, encoding); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ query = PyBytes_AsString(tmp_obj); } else { PyErr_SetString(PyExc_TypeError, "Method execute() expects a string as argument"); return NULL; } /* frees previous result */ if (self->result) { PQclear(self->result); self->result = NULL; } self->max_row = 0; self->current_row = 0; self->num_fields = 0; self->encoding = encoding; /* gets result */ Py_BEGIN_ALLOW_THREADS self->result = PQexec(self->pgcnx->cnx, query); Py_END_ALLOW_THREADS /* we don't need the auxiliary string any more */ Py_XDECREF(tmp_obj); /* checks result validity */ if (!self->result) { PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } /* this may have changed the datestyle, so we reset the date format in order to force fetching it newly when next time requested */ self->pgcnx->date_format = date_format; /* this is normally NULL */ /* checks result status */ switch (PQresultStatus(self->result)) { /* query succeeded */ case PGRES_TUPLES_OK: /* DQL: returns None (DB-SIG compliant) */ self->result_type = RESULT_DQL; self->max_row = PQntuples(self->result); self->num_fields = PQnfields(self->result); Py_INCREF(Py_None); return Py_None; case PGRES_COMMAND_OK: /* other requests */ case PGRES_COPY_OUT: case PGRES_COPY_IN: { long num_rows; char *tmp; tmp = PQcmdTuples(self->result); if (tmp[0]) { self->result_type = RESULT_DML; num_rows = atol(tmp); } else { self->result_type = RESULT_DDL; num_rows = -1; } return PyLong_FromLong(num_rows); } /* query failed */ case PGRES_EMPTY_QUERY: PyErr_SetString(PyExc_ValueError, "Empty query"); break; case PGRES_BAD_RESPONSE: case PGRES_FATAL_ERROR: case PGRES_NONFATAL_ERROR: set_error(ProgrammingError, "Cannot execute command", self->pgcnx->cnx, self->result); break; default: set_error_msg(InternalError, "Internal error: unknown result status"); } /* frees result and returns error */ PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; return NULL; } /* Get oid status for last query (valid for INSERTs, 0 for other). */ static char source_oidstatus__doc__[] = "oidstatus() -- return oid of last inserted row (if available)"; static PyObject * source_oidstatus(sourceObject *self, PyObject *noargs) { Oid oid; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT)) { return NULL; } /* retrieves oid status */ if ((oid = PQoidValue(self->result)) == InvalidOid) { Py_INCREF(Py_None); return Py_None; } return PyLong_FromLong((long)oid); } /* Fetch rows from last result. */ static char source_fetch__doc__[] = "fetch(num) -- return the next num rows from the last result in a list\n\n" "If num parameter is omitted arraysize attribute value is used.\n" "If size equals -1, all rows are fetched.\n"; static PyObject * source_fetch(sourceObject *self, PyObject *args) { PyObject *res_list; int i, k; long size; int encoding; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL | CHECK_CNX)) { return NULL; } /* checks args */ size = self->arraysize; if (!PyArg_ParseTuple(args, "|l", &size)) { PyErr_SetString(PyExc_TypeError, "fetch(num), with num (integer, optional)"); return NULL; } /* seeks last line */ /* limit size to be within the amount of data we actually have */ if (size == -1 || (self->max_row - self->current_row) < size) { size = self->max_row - self->current_row; } /* allocate list for result */ if (!(res_list = PyList_New(0))) return NULL; encoding = self->encoding; /* builds result */ for (i = 0, k = self->current_row; i < size; ++i, ++k) { PyObject *rowtuple; int j; if (!(rowtuple = PyTuple_New(self->num_fields))) { Py_DECREF(res_list); return NULL; } for (j = 0; j < self->num_fields; ++j) { PyObject *str; if (PQgetisnull(self->result, k, j)) { Py_INCREF(Py_None); str = Py_None; } else { char *s = PQgetvalue(self->result, k, j); Py_ssize_t size = PQgetlength(self->result, k, j); if (PQfformat(self->result, j) == 0) { /* textual format */ str = get_decoded_string(s, size, encoding); if (!str) /* cannot decode */ str = PyBytes_FromStringAndSize(s, size); } else { str = PyBytes_FromStringAndSize(s, size); } } PyTuple_SET_ITEM(rowtuple, j, str); } if (PyList_Append(res_list, rowtuple)) { Py_DECREF(rowtuple); Py_DECREF(res_list); return NULL; } Py_DECREF(rowtuple); } self->current_row = k; return res_list; } /* Change current row (internal wrapper for all "move" methods). */ static PyObject * _source_move(sourceObject *self, int move) { /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* changes the current row */ switch (move) { case QUERY_MOVEFIRST: self->current_row = 0; break; case QUERY_MOVELAST: self->current_row = self->max_row - 1; break; case QUERY_MOVENEXT: if (self->current_row != self->max_row) ++self->current_row; break; case QUERY_MOVEPREV: if (self->current_row > 0) self->current_row--; break; } Py_INCREF(Py_None); return Py_None; } /* Move to first result row. */ static char source_movefirst__doc__[] = "movefirst() -- move to first result row"; static PyObject * source_movefirst(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEFIRST); } /* Move to last result row. */ static char source_movelast__doc__[] = "movelast() -- move to last valid result row"; static PyObject * source_movelast(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVELAST); } /* Move to next result row. */ static char source_movenext__doc__[] = "movenext() -- move to next result row"; static PyObject * source_movenext(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVENEXT); } /* Move to previous result row. */ static char source_moveprev__doc__[] = "moveprev() -- move to previous result row"; static PyObject * source_moveprev(sourceObject *self, PyObject *noargs) { return _source_move(self, QUERY_MOVEPREV); } /* Put copy data. */ static char source_putdata__doc__[] = "putdata(buffer) -- send data to server during copy from stdin"; static PyObject * source_putdata(sourceObject *self, PyObject *buffer) { PyObject *tmp_obj = NULL; /* an auxiliary object */ char *buf; /* the buffer as encoded string */ Py_ssize_t nbytes; /* length of string */ char *errormsg = NULL; /* error message */ int res; /* direct result of the operation */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (buffer == Py_None) { /* pass None for terminating the operation */ buf = errormsg = NULL; } else if (PyBytes_Check(buffer)) { /* or pass a byte string */ PyBytes_AsStringAndSize(buffer, &buf, &nbytes); } else if (PyUnicode_Check(buffer)) { /* or pass a unicode string */ tmp_obj = get_encoded_string(buffer, PQclientEncoding(self->pgcnx->cnx)); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ PyBytes_AsStringAndSize(tmp_obj, &buf, &nbytes); } else if (PyErr_GivenExceptionMatches(buffer, PyExc_BaseException)) { /* or pass a Python exception for sending an error message */ tmp_obj = PyObject_Str(buffer); if (PyUnicode_Check(tmp_obj)) { PyObject *obj = tmp_obj; tmp_obj = get_encoded_string(obj, PQclientEncoding(self->pgcnx->cnx)); Py_DECREF(obj); if (!tmp_obj) return NULL; /* pass the UnicodeEncodeError */ } errormsg = PyBytes_AsString(tmp_obj); buf = NULL; } else { PyErr_SetString(PyExc_TypeError, "Method putdata() expects a buffer, None" " or an exception as argument"); return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_IN) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_in state"); Py_XDECREF(tmp_obj); return NULL; } if (buf) { res = nbytes ? PQputCopyData(self->pgcnx->cnx, buf, (int)nbytes) : 1; } else { res = PQputCopyEnd(self->pgcnx->cnx, errormsg); } Py_XDECREF(tmp_obj); if (res != 1) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (buf) { /* buffer has been sent */ ret = Py_None; Py_INCREF(ret); } else { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyLong_FromLong(num_rows); } else { if (!errormsg) errormsg = PQerrorMessage(self->pgcnx->cnx); PyErr_SetString(PyExc_IOError, errormsg); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } return ret; /* None or number of rows */ } /* Get copy data. */ static char source_getdata__doc__[] = "getdata(decode) -- receive data to server during copy to stdout"; static PyObject * source_getdata(sourceObject *self, PyObject *args) { int *decode = 0; /* decode flag */ char *buffer; /* the copied buffer as encoded byte string */ Py_ssize_t nbytes; /* length of the byte string */ PyObject *ret; /* return value */ /* checks validity */ if (!_check_source_obj(self, CHECK_CNX)) { return NULL; } /* make sure that the connection object is valid */ if (!self->pgcnx->cnx) { return NULL; } if (!PyArg_ParseTuple(args, "|i", &decode)) { return NULL; } /* checks validity */ if (!_check_source_obj(self, CHECK_CNX | CHECK_RESULT) || PQresultStatus(self->result) != PGRES_COPY_OUT) { PyErr_SetString(PyExc_IOError, "Connection is invalid or not in copy_out state"); return NULL; } nbytes = PQgetCopyData(self->pgcnx->cnx, &buffer, 0); if (!nbytes || nbytes < -1) { /* an error occurred */ PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); return NULL; } if (nbytes == -1) { /* copy is done */ PGresult *result; /* final result of the operation */ Py_BEGIN_ALLOW_THREADS; result = PQgetResult(self->pgcnx->cnx); Py_END_ALLOW_THREADS; if (PQresultStatus(result) == PGRES_COMMAND_OK) { char *tmp; long num_rows; tmp = PQcmdTuples(result); num_rows = tmp[0] ? atol(tmp) : -1; ret = PyLong_FromLong(num_rows); } else { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->pgcnx->cnx)); ret = NULL; } PQclear(self->result); self->result = NULL; self->result_type = RESULT_EMPTY; } else { /* a row has been returned */ ret = decode ? get_decoded_string(buffer, nbytes, PQclientEncoding(self->pgcnx->cnx)) : PyBytes_FromStringAndSize(buffer, nbytes); PQfreemem(buffer); } return ret; /* buffer or number of rows */ } /* Find field number from string/integer (internal use only). */ static int _source_fieldindex(sourceObject *self, PyObject *param, const char *usage) { int num; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) return -1; /* gets field number */ if (PyUnicode_Check(param)) { num = PQfnumber(self->result, PyBytes_AsString(param)); } else if (PyLong_Check(param)) { num = (int)PyLong_AsLong(param); } else { PyErr_SetString(PyExc_TypeError, usage); return -1; } /* checks field validity */ if (num < 0 || num >= self->num_fields) { PyErr_SetString(PyExc_ValueError, "Unknown field"); return -1; } return num; } /* Build field information from position (internal use only). */ static PyObject * _source_buildinfo(sourceObject *self, int num) { PyObject *result; /* allocates tuple */ result = PyTuple_New(5); if (!result) { return NULL; } /* affects field information */ PyTuple_SET_ITEM(result, 0, PyLong_FromLong(num)); PyTuple_SET_ITEM(result, 1, PyUnicode_FromString(PQfname(self->result, num))); PyTuple_SET_ITEM(result, 2, PyLong_FromLong((long)PQftype(self->result, num))); PyTuple_SET_ITEM(result, 3, PyLong_FromLong(PQfsize(self->result, num))); PyTuple_SET_ITEM(result, 4, PyLong_FromLong(PQfmod(self->result, num))); return result; } /* Lists fields info. */ static char source_listinfo__doc__[] = "listinfo() -- get information for all fields" " (position, name, type oid, size, type modifier)"; static PyObject * source_listInfo(sourceObject *self, PyObject *noargs) { PyObject *result, *info; int i; /* checks validity */ if (!_check_source_obj(self, CHECK_RESULT | CHECK_DQL)) { return NULL; } /* builds result */ if (!(result = PyTuple_New(self->num_fields))) { return NULL; } for (i = 0; i < self->num_fields; ++i) { info = _source_buildinfo(self, i); if (!info) { Py_DECREF(result); return NULL; } PyTuple_SET_ITEM(result, i, info); } /* returns result */ return result; } /* List fields information for last result. */ static char source_fieldinfo__doc__[] = "fieldinfo(desc) -- get specified field info (position, name, type oid)"; static PyObject * source_fieldinfo(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method fieldinfo() needs a string or integer as argument")) == -1) { return NULL; } /* returns result */ return _source_buildinfo(self, num); } /* Retrieve field value. */ static char source_field__doc__[] = "field(desc) -- return specified field value"; static PyObject * source_field(sourceObject *self, PyObject *desc) { int num; /* checks args and validity */ if ((num = _source_fieldindex( self, desc, "Method field() needs a string or integer as argument")) == -1) { return NULL; } return PyUnicode_FromString( PQgetvalue(self->result, self->current_row, num)); } /* Get the list of source object attributes. */ static PyObject * source_dir(connObject *self, PyObject *noargs) { PyObject *attrs; attrs = PyObject_Dir(PyObject_Type((PyObject *)self)); PyObject_CallMethod(attrs, "extend", "[sssss]", "pgcnx", "arraysize", "resulttype", "ntuples", "nfields"); return attrs; } /* Source object methods */ static PyMethodDef source_methods[] = { {"__dir__", (PyCFunction)source_dir, METH_NOARGS, NULL}, {"close", (PyCFunction)source_close, METH_NOARGS, source_close__doc__}, {"execute", (PyCFunction)source_execute, METH_O, source_execute__doc__}, {"oidstatus", (PyCFunction)source_oidstatus, METH_NOARGS, source_oidstatus__doc__}, {"fetch", (PyCFunction)source_fetch, METH_VARARGS, source_fetch__doc__}, {"movefirst", (PyCFunction)source_movefirst, METH_NOARGS, source_movefirst__doc__}, {"movelast", (PyCFunction)source_movelast, METH_NOARGS, source_movelast__doc__}, {"movenext", (PyCFunction)source_movenext, METH_NOARGS, source_movenext__doc__}, {"moveprev", (PyCFunction)source_moveprev, METH_NOARGS, source_moveprev__doc__}, {"putdata", (PyCFunction)source_putdata, METH_O, source_putdata__doc__}, {"getdata", (PyCFunction)source_getdata, METH_VARARGS, source_getdata__doc__}, {"field", (PyCFunction)source_field, METH_O, source_field__doc__}, {"fieldinfo", (PyCFunction)source_fieldinfo, METH_O, source_fieldinfo__doc__}, {"listinfo", (PyCFunction)source_listInfo, METH_NOARGS, source_listinfo__doc__}, {NULL, NULL}}; static char source__doc__[] = "PyGreSQL source object"; /* Source type definition */ static PyTypeObject sourceType = { PyVarObject_HEAD_INIT(NULL, 0) "pgdb.Source", /* tp_name */ sizeof(sourceObject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ (destructor)source_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ (setattrfunc)source_setattr, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc)source_str, /* tp_str */ (getattrofunc)source_getattr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ source__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ source_methods, /* tp_methods */ }; PyGreSQL-PyGreSQL-166b135/ext/pgtypes.h000066400000000000000000000107161450706350600174540ustar00rootroot00000000000000/* pgtypes - PostgreSQL type definitions These are the standard PostgreSQL 11.1 built-in types, extracted from src/backend/catalog/pg_type_d.h, because that header file is sometimes not available or needs other header files to get properly included. You can also query pg_type to get this information. */ #ifndef PG_TYPE_D_H #define BOOLOID 16 #define BYTEAOID 17 #define CHAROID 18 #define NAMEOID 19 #define INT8OID 20 #define INT2OID 21 #define INT2VECTOROID 22 #define INT4OID 23 #define REGPROCOID 24 #define TEXTOID 25 #define OIDOID 26 #define TIDOID 27 #define XIDOID 28 #define CIDOID 29 #define OIDVECTOROID 30 #define JSONOID 114 #define XMLOID 142 #define XMLARRAYOID 143 #define JSONARRAYOID 199 #define PGNODETREEOID 194 #define PGNDISTINCTOID 3361 #define PGDEPENDENCIESOID 3402 #define PGDDLCOMMANDOID 32 #define SMGROID 210 #define POINTOID 600 #define LSEGOID 601 #define PATHOID 602 #define BOXOID 603 #define POLYGONOID 604 #define LINEOID 628 #define LINEARRAYOID 629 #define FLOAT4OID 700 #define FLOAT8OID 701 #define ABSTIMEOID 702 #define RELTIMEOID 703 #define TINTERVALOID 704 #define UNKNOWNOID 705 #define CIRCLEOID 718 #define CIRCLEARRAYOID 719 #define CASHOID 790 #define MONEYARRAYOID 791 #define MACADDROID 829 #define INETOID 869 #define CIDROID 650 #define MACADDR8OID 774 #define BOOLARRAYOID 1000 #define BYTEAARRAYOID 1001 #define CHARARRAYOID 1002 #define NAMEARRAYOID 1003 #define INT2ARRAYOID 1005 #define INT2VECTORARRAYOID 1006 #define INT4ARRAYOID 1007 #define REGPROCARRAYOID 1008 #define TEXTARRAYOID 1009 #define OIDARRAYOID 1028 #define TIDARRAYOID 1010 #define XIDARRAYOID 1011 #define CIDARRAYOID 1012 #define OIDVECTORARRAYOID 1013 #define BPCHARARRAYOID 1014 #define VARCHARARRAYOID 1015 #define INT8ARRAYOID 1016 #define POINTARRAYOID 1017 #define LSEGARRAYOID 1018 #define PATHARRAYOID 1019 #define BOXARRAYOID 1020 #define FLOAT4ARRAYOID 1021 #define FLOAT8ARRAYOID 1022 #define ABSTIMEARRAYOID 1023 #define RELTIMEARRAYOID 1024 #define TINTERVALARRAYOID 1025 #define POLYGONARRAYOID 1027 #define ACLITEMOID 1033 #define ACLITEMARRAYOID 1034 #define MACADDRARRAYOID 1040 #define MACADDR8ARRAYOID 775 #define INETARRAYOID 1041 #define CIDRARRAYOID 651 #define CSTRINGARRAYOID 1263 #define BPCHAROID 1042 #define VARCHAROID 1043 #define DATEOID 1082 #define TIMEOID 1083 #define TIMESTAMPOID 1114 #define TIMESTAMPARRAYOID 1115 #define DATEARRAYOID 1182 #define TIMEARRAYOID 1183 #define TIMESTAMPTZOID 1184 #define TIMESTAMPTZARRAYOID 1185 #define INTERVALOID 1186 #define INTERVALARRAYOID 1187 #define NUMERICARRAYOID 1231 #define TIMETZOID 1266 #define TIMETZARRAYOID 1270 #define BITOID 1560 #define BITARRAYOID 1561 #define VARBITOID 1562 #define VARBITARRAYOID 1563 #define NUMERICOID 1700 #define REFCURSOROID 1790 #define REFCURSORARRAYOID 2201 #define REGPROCEDUREOID 2202 #define REGOPEROID 2203 #define REGOPERATOROID 2204 #define REGCLASSOID 2205 #define REGTYPEOID 2206 #define REGROLEOID 4096 #define REGNAMESPACEOID 4089 #define REGPROCEDUREARRAYOID 2207 #define REGOPERARRAYOID 2208 #define REGOPERATORARRAYOID 2209 #define REGCLASSARRAYOID 2210 #define REGTYPEARRAYOID 2211 #define REGROLEARRAYOID 4097 #define REGNAMESPACEARRAYOID 4090 #define UUIDOID 2950 #define UUIDARRAYOID 2951 #define LSNOID 3220 #define PG_LSNARRAYOID 3221 #define TSVECTOROID 3614 #define GTSVECTOROID 3642 #define TSQUERYOID 3615 #define REGCONFIGOID 3734 #define REGDICTIONARYOID 3769 #define TSVECTORARRAYOID 3643 #define GTSVECTORARRAYOID 3644 #define TSQUERYARRAYOID 3645 #define REGCONFIGARRAYOID 3735 #define REGDICTIONARYARRAYOID 3770 #define JSONBOID 3802 #define JSONBARRAYOID 3807 #define TXID_SNAPSHOTOID 2970 #define TXID_SNAPSHOTARRAYOID 2949 #define INT4RANGEOID 3904 #define INT4RANGEARRAYOID 3905 #define NUMRANGEOID 3906 #define NUMRANGEARRAYOID 3907 #define TSRANGEOID 3908 #define TSRANGEARRAYOID 3909 #define TSTZRANGEOID 3910 #define TSTZRANGEARRAYOID 3911 #define DATERANGEOID 3912 #define DATERANGEARRAYOID 3913 #define INT8RANGEOID 3926 #define INT8RANGEARRAYOID 3927 #define RECORDOID 2249 #define RECORDARRAYOID 2287 #define CSTRINGOID 2275 #define ANYOID 2276 #define ANYARRAYOID 2277 #define VOIDOID 2278 #define TRIGGEROID 2279 #define EVTTRIGGEROID 3838 #define LANGUAGE_HANDLEROID 2280 #define INTERNALOID 2281 #define OPAQUEOID 2282 #define ANYELEMENTOID 2283 #define ANYNONARRAYOID 2776 #define ANYENUMOID 3500 #define FDW_HANDLEROID 3115 #define INDEX_AM_HANDLEROID 325 #define TSM_HANDLEROID 3310 #define ANYRANGEOID 3831 #endif /* PG_TYPE_D_H */ PyGreSQL-PyGreSQL-166b135/pg/000077500000000000000000000000001450706350600154115ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/pg/__init__.py000066400000000000000000000070121450706350600175220ustar00rootroot00000000000000#!/usr/bin/python # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the classic pg module. # # Copyright (c) 2023 by the PyGreSQL Development Team # # The notification handler is based on pgnotify which is # Copyright (c) 2001 Ng Pheng Siong. All rights reserved. # # Please see the LICENSE.TXT file for specific restrictions. """PyGreSQL classic interface. This pg module implements some basic database management stuff. It includes the _pg module and builds on it, providing the higher level wrapper class named DB with additional functionality. This is known as the "classic" ("old style") PyGreSQL interface. For a DB-API 2 compliant interface use the newer pgdb module. """ from __future__ import annotations from .adapt import Adapter, Bytea, Hstore, Json, Literal from .cast import Typecasts, get_typecast, set_typecast from .core import ( INV_READ, INV_WRITE, POLLING_FAILED, POLLING_OK, POLLING_READING, POLLING_WRITING, RESULT_DDL, RESULT_DML, RESULT_DQL, RESULT_EMPTY, SEEK_CUR, SEEK_END, SEEK_SET, TRANS_ACTIVE, TRANS_IDLE, TRANS_INERROR, TRANS_INTRANS, TRANS_UNKNOWN, Connection, DatabaseError, DataError, Error, IntegrityError, InterfaceError, InternalError, InvalidResultError, MultipleResultsError, NoResultError, NotSupportedError, OperationalError, ProgrammingError, Query, Warning, cast_array, cast_hstore, cast_record, connect, escape_bytea, escape_string, get_array, get_bool, get_bytea_escaped, get_datestyle, get_decimal, get_decimal_point, get_defbase, get_defhost, get_defopt, get_defport, get_defuser, get_jsondecode, get_pqlib_version, set_array, set_bool, set_bytea_escaped, set_datestyle, set_decimal, set_decimal_point, set_defbase, set_defhost, set_defopt, set_defpasswd, set_defport, set_defuser, set_jsondecode, set_query_helpers, unescape_bytea, version, ) from .db import DB from .helpers import RowCache, init_core from .notify import NotificationHandler __all__ = [ 'DB', 'Adapter', 'NotificationHandler', 'Typecasts', 'Bytea', 'Hstore', 'Json', 'Literal', 'Error', 'Warning', 'DataError', 'DatabaseError', 'IntegrityError', 'InterfaceError', 'InternalError', 'InvalidResultError', 'MultipleResultsError', 'NoResultError', 'NotSupportedError', 'OperationalError', 'ProgrammingError', 'Connection', 'Query', 'RowCache', 'INV_READ', 'INV_WRITE', 'POLLING_OK', 'POLLING_FAILED', 'POLLING_READING', 'POLLING_WRITING', 'RESULT_DDL', 'RESULT_DML', 'RESULT_DQL', 'RESULT_EMPTY', 'SEEK_CUR', 'SEEK_END', 'SEEK_SET', 'TRANS_ACTIVE', 'TRANS_IDLE', 'TRANS_INERROR', 'TRANS_INTRANS', 'TRANS_UNKNOWN', 'cast_array', 'cast_hstore', 'cast_record', 'connect', 'escape_bytea', 'escape_string', 'unescape_bytea', 'get_array', 'get_bool', 'get_bytea_escaped', 'get_datestyle', 'get_decimal', 'get_decimal_point', 'get_defbase', 'get_defhost', 'get_defopt', 'get_defport', 'get_defuser', 'get_jsondecode', 'get_pqlib_version', 'get_typecast', 'set_array', 'set_bool', 'set_bytea_escaped', 'set_datestyle', 'set_decimal', 'set_decimal_point', 'set_defbase', 'set_defhost', 'set_defopt', 'set_defpasswd', 'set_defport', 'set_defuser', 'set_jsondecode', 'set_query_helpers', 'set_typecast', 'version', '__version__', ] __version__ = version init_core() PyGreSQL-PyGreSQL-166b135/pg/_pg.pyi000066400000000000000000000404461450706350600167110ustar00rootroot00000000000000"""Type hints for the PyGreSQL C extension.""" from __future__ import annotations from typing import Any, Callable, Iterable, Sequence, TypeVar try: AnyStr = TypeVar('AnyStr', str, bytes, str | bytes) except TypeError: # Python < 3.10 AnyStr = Any # type: ignore SomeNamedTuple = Any # alias for accessing arbitrary named tuples version: str __version__: str RESULT_EMPTY: int RESULT_DML: int RESULT_DDL: int RESULT_DQL: int TRANS_IDLE: int TRANS_ACTIVE: int TRANS_INTRANS: int TRANS_INERROR: int TRANS_UNKNOWN: int POLLING_OK: int POLLING_FAILED: int POLLING_READING: int POLLING_WRITING: int INV_READ: int INV_WRITE: int SEEK_SET: int SEEK_CUR: int SEEK_END: int class Error(Exception): """Exception that is the base class of all other error exceptions.""" class Warning(Exception): # noqa: N818 """Exception raised for important warnings.""" class InterfaceError(Error): """Exception raised for errors related to the database interface.""" class DatabaseError(Error): """Exception raised for errors that are related to the database.""" sqlstate: str | None class InternalError(DatabaseError): """Exception raised when the database encounters an internal error.""" class OperationalError(DatabaseError): """Exception raised for errors related to the operation of the database.""" class ProgrammingError(DatabaseError): """Exception raised for programming errors.""" class IntegrityError(DatabaseError): """Exception raised when the relational integrity is affected.""" class DataError(DatabaseError): """Exception raised for errors due to problems with the processed data.""" class NotSupportedError(DatabaseError): """Exception raised when a method or database API is not supported.""" class InvalidResultError(DataError): """Exception when a database operation produced an invalid result.""" class NoResultError(InvalidResultError): """Exception when a database operation did not produce any result.""" class MultipleResultsError(InvalidResultError): """Exception when a database operation produced multiple results.""" class Source: """Source object.""" arraysize: int resulttype: int ntuples: int nfields: int def execute(self, sql: str) -> int | None: """Execute a SQL statement.""" ... def fetch(self, num: int) -> list[tuple]: """Return the next num rows from the last result in a list.""" ... def listinfo(self) -> tuple[tuple[int, str, int, int, int], ...]: """Get information for all fields.""" ... def oidstatus(self) -> int | None: """Return oid of last inserted row (if available).""" ... def putdata(self, buffer: str | bytes | BaseException | None ) -> int | None: """Send data to server during copy from stdin.""" ... def getdata(self, decode: bool | None = None) -> str | bytes | int: """Receive data to server during copy to stdout.""" ... def close(self) -> None: """Close query object without deleting it.""" ... class LargeObject: """Large object.""" oid: int pgcnx: Connection error: str def open(self, mode: int) -> None: """Open a large object. The valid values for 'mode' parameter are defined as the module level constants INV_READ and INV_WRITE. """ ... def close(self) -> None: """Close a large object.""" ... def read(self, size: int) -> bytes: """Read data from large object.""" ... def write(self, data: bytes) -> None: """Write data to large object.""" ... def seek(self, offset: int, whence: int) -> int: """Change current position in large object. The valid values for the 'whence' parameter are defined as the module level constants SEEK_SET, SEEK_CUR and SEEK_END. """ ... def unlink(self) -> None: """Delete large object.""" ... def size(self) -> int: """Return the large object size.""" ... def export(self, filename: str) -> None: """Export a large object to a file.""" ... class Connection: """Connection object. This object handles a connection to a PostgreSQL database. It embeds and hides all the parameters that define this connection, thus just leaving really significant parameters in function calls. """ host: str port: int db: str options: str error: str status: int user : str protocol_version: int server_version: int socket: int backend_pid: int ssl_in_use: bool ssl_attributes: dict[str, str | None] def source(self) -> Source: """Create a new source object for this connection.""" ... def query(self, cmd: str, args: Sequence | None = None) -> Query: """Create a new query object for this connection. Note that if the command is something other than DQL, this method can return an int, str or None instead of a Query. """ ... def send_query(self, cmd: str, args: Sequence | None = None) -> Query: """Create a new asynchronous query object for this connection.""" ... def query_prepared(self, name: str, args: Sequence | None = None) -> Query: """Execute a prepared statement.""" ... def prepare(self, name: str, cmd: str) -> None: """Create a prepared statement.""" ... def describe_prepared(self, name: str) -> Query: """Describe a prepared statement.""" ... def poll(self) -> int: """Complete an asynchronous connection and get its state.""" ... def reset(self) -> None: """Reset the connection.""" ... def cancel(self) -> None: """Abandon processing of current SQL command.""" ... def close(self) -> None: """Close the database connection.""" ... def fileno(self) -> int: """Get the socket used to connect to the database.""" ... def get_cast_hook(self) -> Callable | None: """Get the function that handles all external typecasting.""" ... def set_cast_hook(self, hook: Callable | None) -> None: """Set a function that will handle all external typecasting.""" ... def get_notice_receiver(self) -> Callable | None: """Get the current notice receiver.""" ... def set_notice_receiver(self, receiver: Callable | None) -> None: """Set a custom notice receiver.""" ... def getnotify(self) -> tuple[str, int, str] | None: """Get the last notify from the server.""" ... def inserttable(self, table: str, values: Sequence[list|tuple], columns: list[str] | tuple[str, ...] | None = None) -> int: """Insert a Python iterable into a database table.""" ... def transaction(self) -> int: """Get the current in-transaction status of the server. The status returned by this method can be TRANS_IDLE (currently idle), TRANS_ACTIVE (a command is in progress), TRANS_INTRANS (idle, in a valid transaction block), or TRANS_INERROR (idle, in a failed transaction block). TRANS_UNKNOWN is reported if the connection is bad. The status TRANS_ACTIVE is reported only when a query has been sent to the server and not yet completed. """ ... def parameter(self, name: str) -> str | None: """Look up a current parameter setting of the server.""" ... def date_format(self) -> str: """Look up the date format currently being used by the database.""" ... def escape_literal(self, s: AnyStr) -> AnyStr: """Escape a literal constant for use within SQL.""" ... def escape_identifier(self, s: AnyStr) -> AnyStr: """Escape an identifier for use within SQL.""" ... def escape_string(self, s: AnyStr) -> AnyStr: """Escape a string for use within SQL.""" ... def escape_bytea(self, s: AnyStr) -> AnyStr: """Escape binary data for use within SQL as type 'bytea'.""" ... def putline(self, line: str) -> None: """Write a line to the server socket.""" ... def getline(self) -> str: """Get a line from server socket.""" ... def endcopy(self) -> None: """Synchronize client and server.""" ... def set_non_blocking(self, nb: bool) -> None: """Set the non-blocking mode of the connection.""" ... def is_non_blocking(self) -> bool: """Get the non-blocking mode of the connection.""" ... def locreate(self, mode: int) -> LargeObject: """Create a large object in the database. The valid values for 'mode' parameter are defined as the module level constants INV_READ and INV_WRITE. """ ... def getlo(self, oid: int) -> LargeObject: """Build a large object from given oid.""" ... def loimport(self, filename: str) -> LargeObject: """Import a file to a large object.""" ... class Query: """Query object. The Query object returned by Connection.query and DB.query can be used as an iterable returning rows as tuples. You can also directly access row tuples using their index, and get the number of rows with the len() function. The Query class also provides the several methods for accessing the results of the query. """ def __len__(self) -> int: ... def __getitem__(self, key: int) -> object: ... def __iter__(self) -> Query: ... def __next__(self) -> tuple: ... def getresult(self) -> list[tuple]: """Get query values as list of tuples.""" ... def dictresult(self) -> list[dict[str, object]]: """Get query values as list of dictionaries.""" ... def dictiter(self) -> Iterable[dict[str, object]]: """Get query values as iterable of dictionaries.""" ... def namedresult(self) -> list[SomeNamedTuple]: """Get query values as list of named tuples.""" ... def namediter(self) -> Iterable[SomeNamedTuple]: """Get query values as iterable of named tuples.""" ... def one(self) -> tuple | None: """Get one row from the result of a query as a tuple.""" ... def single(self) -> tuple: """Get single row from the result of a query as a tuple.""" ... def onedict(self) -> dict[str, object] | None: """Get one row from the result of a query as a dictionary.""" ... def singledict(self) -> dict[str, object]: """Get single row from the result of a query as a dictionary.""" ... def onenamed(self) -> SomeNamedTuple | None: """Get one row from the result of a query as named tuple.""" ... def singlenamed(self) -> SomeNamedTuple: """Get single row from the result of a query as named tuple.""" ... def scalarresult(self) -> list: """Get first fields from query result as list of scalar values.""" def scalariter(self) -> Iterable: """Get first fields from query result as iterable of scalar values.""" ... def onescalar(self) -> object | None: """Get one row from the result of a query as scalar value.""" ... def singlescalar(self) -> object: """Get single row from the result of a query as scalar value.""" ... def fieldname(self, num: int) -> str: """Get field name from its number.""" ... def fieldnum(self, name: str) -> int: """Get field number from its name.""" ... def listfields(self) -> tuple[str, ...]: """List field names of query result.""" ... def fieldinfo(self, column: int | str | None) -> tuple[str, int, int, int]: """Get information on one or all fields of the query. The four-tuples contain the following information: The field name, the internal OID number of the field type, the size in bytes of the column or a negative value if it is of variable size, and a type-specific modifier value. """ ... def memsize(self) -> int: """Return number of bytes allocated by query result.""" ... def connect(dbname: str | None = None, host: str | None = None, port: int | None = None, opt: str | None = None, user: str | None = None, passwd: str | None = None, nowait: int | None = None) -> Connection: """Connect to a PostgreSQL database.""" ... def cast_array(s: str, cast: Callable | None = None, delim: bytes | None = None) -> list: """Cast a string representing a PostgreSQL array to a Python list.""" ... def cast_record(s: str, cast: Callable | list[Callable | None] | tuple[Callable | None, ...] | None = None, delim: bytes | None = None) -> tuple: """Cast a string representing a PostgreSQL record to a Python tuple.""" ... def cast_hstore(s: str) -> dict[str, str | None]: """Cast a string as a hstore.""" ... def escape_bytea(s: AnyStr) -> AnyStr: """Escape binary data for use within SQL as type 'bytea'.""" ... def unescape_bytea(s: AnyStr) -> bytes: """Unescape 'bytea' data that has been retrieved as text.""" ... def escape_string(s: AnyStr) -> AnyStr: """Escape a string for use within SQL.""" ... def get_pqlib_version() -> int: """Get the version of libpq that is being used by PyGreSQL.""" ... def get_array() -> bool: """Check whether arrays are returned as list objects.""" ... def set_array(on: bool) -> None: """Set whether arrays are returned as list objects.""" ... def get_bool() -> bool: """Check whether boolean values are returned as bool objects.""" ... def set_bool(on: bool | int) -> None: """Set whether boolean values are returned as bool objects.""" ... def get_bytea_escaped() -> bool: """Check whether 'bytea' values are returned as escaped strings.""" ... def set_bytea_escaped(on: bool | int) -> None: """Set whether 'bytea' values are returned as escaped strings.""" ... def get_datestyle() -> str | None: """Get the assumed date style for typecasting.""" ... def set_datestyle(datestyle: str | None) -> None: """Set a fixed date style that shall be assumed when typecasting.""" ... def get_decimal() -> type: """Get the decimal type to be used for numeric values.""" ... def set_decimal(cls: type) -> None: """Set a fixed date style that shall be assumed when typecasting.""" ... def get_decimal_point() -> str | None: """Get the decimal mark used for monetary values.""" ... def set_decimal_point(mark: str | None) -> None: """Specify which decimal mark is used for interpreting monetary values.""" ... def get_jsondecode() -> Callable[[str], object] | None: """Get the function that deserializes JSON formatted strings.""" ... def set_jsondecode(decode: Callable[[str], object] | None) -> None: """Set a function that will deserialize JSON formatted strings.""" ... def get_defbase() -> str | None: """Get the default database name.""" ... def set_defbase(base: str | None) -> None: """Set the default database name.""" ... def get_defhost() -> str | None: """Get the default host.""" ... def set_defhost(host: str | None) -> None: """Set the default host.""" ... def get_defport() -> int | None: """Get the default host.""" ... def set_defport(port: int | None) -> None: """Set the default port.""" ... def get_defopt() -> str | None: """Get the default connection options.""" ... def set_defopt(opt: str | None) -> None: """Set the default connection options.""" ... def get_defuser() -> str | None: """Get the default database user.""" ... def set_defuser(user: str | None) -> None: """Set the default database user.""" ... def get_defpasswd() -> str | None: """Get the default database password.""" ... def set_defpasswd(passwd: str | None) -> None: """Set the default database password.""" ... def set_query_helpers(*helpers: Callable) -> None: """Set internal query helper functions.""" ... PyGreSQL-PyGreSQL-166b135/pg/adapt.py000066400000000000000000000562361450706350600170700ustar00rootroot00000000000000"""Adaptation of parameters.""" from __future__ import annotations import weakref from datetime import date, datetime, time, timedelta from decimal import Decimal from json import dumps as jsonencode from math import isinf, isnan from re import compile as regex from types import MappingProxyType from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Sequence from uuid import UUID from .attrs import AttrDict from .cast import Typecasts from .core import InterfaceError, ProgrammingError from .helpers import quote_if_unqualified if TYPE_CHECKING: from .db import DB __all__ = [ 'Adapter', 'Bytea', 'DbType', 'DbTypes', 'Hstore', 'Literal', 'Json', 'UUID' ] class Bytea(bytes): """Wrapper class for marking Bytea values.""" class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') @classmethod def _quote(cls, s: Any) -> str: if s is None: return 'NULL' if not isinstance(s, str): s = str(s) if not s: return '""' s = s.replace('"', '\\"') if cls._re_quote.search(s): s = f'"{s}"' return s def __str__(self) -> str: """Create a printable representation of the hstore value.""" q = self._quote return ','.join(f'{q(k)}=>{q(v)}' for k, v in self.items()) class Json: """Wrapper class for marking Json values.""" def __init__(self, obj: Any, encode: Callable[[Any], str] | None = None) -> None: """Initialize the JSON object.""" self.obj = obj self.encode = encode or jsonencode def __str__(self) -> str: """Create a printable representation of the JSON object.""" obj = self.obj if isinstance(obj, str): return obj return self.encode(obj) class Literal(str): """Wrapper class for marking literal SQL values.""" class _SimpleTypes(dict): """Dictionary mapping pg_type names to simple type names. The corresponding Python types and simple names are also mapped. """ _type_aliases: Mapping[str, list[str | type]] = MappingProxyType({ 'bool': [bool], 'bytea': [Bytea], 'date': ['interval', 'time', 'timetz', 'timestamp', 'timestamptz', 'abstime', 'reltime', # these are very old 'datetime', 'timedelta', # these do not really exist date, time, datetime, timedelta], 'float': ['float4', 'float8', float], 'int': ['cid', 'int2', 'int4', 'int8', 'oid', 'xid', int], 'hstore': [Hstore], 'json': ['jsonb', Json], 'uuid': [UUID], 'num': ['numeric', Decimal], 'money': [], 'text': ['bpchar', 'char', 'name', 'varchar', bytes, str] }) # noinspection PyMissingConstructor def __init__(self) -> None: """Initialize type mapping.""" for typ, keys in self._type_aliases.items(): keys = [typ, *keys] for key in keys: self[key] = typ if isinstance(key, str): self[f'_{key}'] = f'{typ}[]' elif not isinstance(key, tuple): self[List[key]] = f'{typ}[]' # type: ignore @staticmethod def __missing__(key: str) -> str: """Unmapped types are interpreted as text.""" return 'text' def get_type_dict(self) -> dict[type, str]: """Get a plain dictionary of only the types.""" return {key: typ for key, typ in self.items() if not isinstance(key, (str, tuple))} _simpletypes = _SimpleTypes() _simple_type_dict = _simpletypes.get_type_dict() class _ParameterList(list): """Helper class for building typed parameter lists.""" adapt: Callable def add(self, value: Any, typ:Any = None) -> str: """Typecast value with known database type and build parameter list. If this is a literal value, it will be returned as is. Otherwise, a placeholder will be returned and the parameter list will be augmented. """ # noinspection PyUnresolvedReferences value = self.adapt(value, typ) if isinstance(value, Literal): return value self.append(value) return f'${len(self)}' class DbType(str): """Class augmenting the simple type name with additional info. The following additional information is provided: oid: the PostgreSQL type OID pgtype: the internal PostgreSQL data type name regtype: the registered PostgreSQL data type name simple: the more coarse-grained PyGreSQL type name typlen: the internal size, negative if variable typtype: b = base type, c = composite type etc. category: A = Array, b = Boolean, C = Composite etc. delim: delimiter for array types relid: corresponding table for composite types attnames: attributes for composite types """ oid: int pgtype: str regtype: str simple: str typlen: int typtype: str category: str delim: str relid: int _get_attnames: Callable[[DbType], AttrDict] @property def attnames(self) -> AttrDict: """Get names and types of the fields of a composite type.""" # noinspection PyUnresolvedReferences return self._get_attnames(self) class DbTypes(dict): """Cache for PostgreSQL data types. This cache maps type OIDs and names to DbType objects containing information on the associated database type. """ _num_types = frozenset('int float num money int2 int4 int8' ' float4 float8 numeric money'.split()) def __init__(self, db: DB) -> None: """Initialize type cache for connection.""" super().__init__() self._db = weakref.proxy(db) self._regtypes = False self._typecasts = Typecasts() self._typecasts.get_attnames = self.get_attnames # type: ignore self._typecasts.connection = self._db.db self._query_pg_type = ( "SELECT oid, typname, oid::pg_catalog.regtype," " typlen, typtype, typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type" " WHERE oid OPERATOR(pg_catalog.=) {}::pg_catalog.regtype") def add(self, oid: int, pgtype: str, regtype: str, typlen: int, typtype: str, category: str, delim: str, relid: int ) -> DbType: """Create a PostgreSQL type name with additional info.""" if oid in self: return self[oid] simple = 'record' if relid else _simpletypes[pgtype] typ = DbType(regtype if self._regtypes else simple) typ.oid = oid typ.simple = simple typ.pgtype = pgtype typ.regtype = regtype typ.typlen = typlen typ.typtype = typtype typ.category = category typ.delim = delim typ.relid = relid typ._get_attnames = self.get_attnames # type: ignore return typ def __missing__(self, key: int | str) -> DbType: """Get the type info from the database if it is not cached.""" try: cmd = self._query_pg_type.format(quote_if_unqualified('$1', key)) res = self._db.query(cmd, (key,)).getresult() except ProgrammingError: res = None if not res: raise KeyError(f'Type {key} could not be found') res = res[0] typ = self.add(*res) self[typ.oid] = self[typ.pgtype] = typ return typ def get(self, key: int | str, # type: ignore default: DbType | None = None) -> DbType | None: """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_attnames(self, typ: Any) -> AttrDict | None: """Get names and types of the fields of a composite type.""" if not isinstance(typ, DbType): typ = self.get(typ) if not typ: return None if not typ.relid: return None return self._db.get_attnames(typ.relid, with_oid=False) def get_typecast(self, typ: Any) -> Callable | None: """Get the typecast function for the given database type.""" return self._typecasts.get(typ) def set_typecast(self, typ: str | Sequence[str], cast: Callable) -> None: """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ: str | Sequence[str] | None = None) -> None: """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value: Any, typ: str) -> Any: """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None if not isinstance(typ, DbType): db_type = self.get(typ) if db_type: typ = db_type.pgtype cast = self.get_typecast(typ) if typ else None if not cast or cast is str: # no typecast is necessary return value return cast(value) class Adapter: """Class providing methods for adapting parameters to the database.""" _bool_true_values = frozenset('t true 1 y yes on'.split()) _date_literals = frozenset( 'current_date current_time' ' current_timestamp localtime localtimestamp'.split()) _re_array_quote = regex(r'[{},"\\\s]|^[Nn][Uu][Ll][Ll]$') _re_record_quote = regex(r'[(,"\\]') _re_array_escape = _re_record_escape = regex(r'(["\\])') def __init__(self, db: DB): """Initialize the adapter object with the given connection.""" self.db = weakref.proxy(db) @classmethod def _adapt_bool(cls, v: Any) -> str | None: """Adapt a boolean parameter.""" if isinstance(v, str): if not v: return None v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_date(cls, v: Any) -> Any: """Adapt a date parameter.""" if not v: return None if isinstance(v, str) and v.lower() in cls._date_literals: return Literal(v) return v @staticmethod def _adapt_num(v: Any) -> Any: """Adapt a numeric parameter.""" if not v and v != 0: return None return v _adapt_int = _adapt_float = _adapt_money = _adapt_num def _adapt_bytea(self, v: Any) -> str: """Adapt a bytea parameter.""" return self.db.escape_bytea(v) def _adapt_json(self, v: Any) -> str | None: """Adapt a json parameter.""" if not v: return None if isinstance(v, str): return v if isinstance(v, Json): return str(v) return self.db.encode_json(v) def _adapt_hstore(self, v: Any) -> str | None: """Adapt a hstore parameter.""" if not v: return None if isinstance(v, str): return v if isinstance(v, Hstore): return str(v) if isinstance(v, dict): return str(Hstore(v)) raise TypeError(f'Hstore parameter {v} has wrong type') def _adapt_uuid(self, v: Any) -> str | None: """Adapt a UUID parameter.""" if not v: return None if isinstance(v, str): return v return str(v) @classmethod def _adapt_text_array(cls, v: Any) -> str: """Adapt a text type array parameter.""" if isinstance(v, list): adapt = cls._adapt_text_array return '{' + ','.join(adapt(v) for v in v) + '}' if v is None: return 'null' if not v: return '""' v = str(v) if cls._re_array_quote.search(v): v = cls._re_array_escape.sub(r'\\\1', v) v = f'"{v}"' return v _adapt_date_array = _adapt_text_array @classmethod def _adapt_bool_array(cls, v: Any) -> str: """Adapt a boolean array parameter.""" if isinstance(v, list): adapt = cls._adapt_bool_array return '{' + ','.join(adapt(v) for v in v) + '}' if v is None: return 'null' if isinstance(v, str): if not v: return 'null' v = v.lower() in cls._bool_true_values return 't' if v else 'f' @classmethod def _adapt_num_array(cls, v: Any) -> str: """Adapt a numeric array parameter.""" if isinstance(v, list): adapt = cls._adapt_num_array v = '{' + ','.join(adapt(v) for v in v) + '}' if not v and v != 0: return 'null' return str(v) _adapt_int_array = _adapt_float_array = _adapt_money_array = \ _adapt_num_array def _adapt_bytea_array(self, v: Any) -> bytes: """Adapt a bytea array parameter.""" if isinstance(v, list): return b'{' + b','.join( self._adapt_bytea_array(v) for v in v) + b'}' if v is None: return b'null' return self.db.escape_bytea(v).replace(b'\\', b'\\\\') def _adapt_json_array(self, v: Any) -> str: """Adapt a json array parameter.""" if isinstance(v, list): adapt = self._adapt_json_array return '{' + ','.join(adapt(v) for v in v) + '}' if not v: return 'null' if not isinstance(v, str): v = self.db.encode_json(v) if self._re_array_quote.search(v): v = self._re_array_escape.sub(r'\\\1', v) v = f'"{v}"' return v def _adapt_record(self, v: Any, typ: Any) -> str: """Adapt a record parameter with given type.""" typ = self.get_attnames(typ).values() if len(typ) != len(v): raise TypeError(f'Record parameter {v} has wrong size') adapt = self.adapt value = [] for v, t in zip(v, typ): # noqa: B020 v = adapt(v, t) if v is None: v = '' else: if isinstance(v, bytes): v = v.decode('ascii') elif not isinstance(v, str): v = str(v) if v: if self._re_record_quote.search(v): v = self._re_record_escape.sub(r'\\\1', v) v = f'"{v}"' else: v = '""' value.append(v) v = ','.join(value) return f'({v})' def adapt(self, value: Any, typ: Any = None) -> str: """Adapt a value with known database type.""" if value is not None and not isinstance(value, Literal): if typ: simple = self.get_simple_name(typ) else: typ = simple = self.guess_simple_type(value) or 'text' pg_str = getattr(value, '__pg_str__', None) if pg_str: value = pg_str(typ) if simple == 'text': pass elif simple == 'record': if isinstance(value, tuple): value = self._adapt_record(value, typ) elif simple.endswith('[]'): if isinstance(value, list): adapt = getattr(self, f'_adapt_{simple[:-2]}_array') value = adapt(value) else: adapt = getattr(self, f'_adapt_{simple}') value = adapt(value) return value @staticmethod def simple_type(name: str) -> DbType: """Create a simple database type with given attribute names.""" typ = DbType(name) typ.simple = name return typ @staticmethod def get_simple_name(typ: Any) -> str: """Get the simple name of a database type.""" if isinstance(typ, DbType): # noinspection PyUnresolvedReferences return typ.simple return _simpletypes[typ] @staticmethod def get_attnames(typ: Any) -> dict[str, dict[str, str]]: """Get the attribute names of a composite database type.""" if isinstance(typ, DbType): return typ.attnames return {} @classmethod def guess_simple_type(cls, value: Any) -> str | None: """Try to guess which database type the given value has.""" # optimize for most frequent types try: return _simple_type_dict[type(value)] except KeyError: pass if isinstance(value, (bytes, str)): return 'text' if isinstance(value, bool): return 'bool' if isinstance(value, int): return 'int' if isinstance(value, float): return 'float' if isinstance(value, Decimal): return 'num' if isinstance(value, (date, time, datetime, timedelta)): return 'date' if isinstance(value, Bytea): return 'bytea' if isinstance(value, Json): return 'json' if isinstance(value, Hstore): return 'hstore' if isinstance(value, UUID): return 'uuid' if isinstance(value, list): return (cls.guess_simple_base_type(value) or 'text') + '[]' if isinstance(value, tuple): simple_type = cls.simple_type guess = cls.guess_simple_type # noinspection PyUnusedLocal def get_attnames(self: DbType) -> AttrDict: return AttrDict((str(n + 1), simple_type(guess(v) or 'text')) for n, v in enumerate(value)) typ = simple_type('record') typ._get_attnames = get_attnames return typ return None @classmethod def guess_simple_base_type(cls, value: Any) -> str | None: """Try to guess the base type of a given array.""" for v in value: if isinstance(v, list): typ = cls.guess_simple_base_type(v) else: typ = cls.guess_simple_type(v) if typ: return typ return None def adapt_inline(self, value: Any, nested: bool=False) -> Any: """Adapt a value that is put into the SQL and needs to be quoted.""" if value is None: return 'NULL' if isinstance(value, Literal): return value if isinstance(value, Bytea): value = self.db.escape_bytea(value).decode('ascii') elif isinstance(value, (datetime, date, time, timedelta)): value = str(value) if isinstance(value, (bytes, str)): value = self.db.escape_string(value) return f"'{value}'" if isinstance(value, bool): return 'true' if value else 'false' if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, Decimal)): return value if isinstance(value, list): q = self.adapt_inline s = '[{}]' if nested else 'ARRAY[{}]' return s.format(','.join(str(q(v, nested=True)) for v in value)) if isinstance(value, tuple): q = self.adapt_inline return '({})'.format(','.join(str(q(v)) for v in value)) if isinstance(value, Json): value = self.db.escape_string(str(value)) return f"'{value}'::json" if isinstance(value, Hstore): value = self.db.escape_string(str(value)) return f"'{value}'::hstore" pg_repr = getattr(value, '__pg_repr__', None) if not pg_repr: raise InterfaceError( f'Do not know how to adapt type {type(value)}') value = pg_repr() if isinstance(value, (tuple, list)): value = self.adapt_inline(value) return value def parameter_list(self) -> _ParameterList: """Return a parameter list for parameters with known database types. The list has an add(value, typ) method that will build up the list and return either the literal value or a placeholder. """ params = _ParameterList() params.adapt = self.adapt return params def format_query(self, command: str, values: list | tuple | dict | None = None, types: list | tuple | dict | None = None, inline: bool=False ) -> tuple[str, _ParameterList]: """Format a database query using the given values and types. The optional types describe the values and must be passed as a list, tuple or string (that will be split on whitespace) when values are passed as a list or tuple, or as a dict if values are passed as a dict. If inline is set to True, then parameters will be passed inline together with the query string. """ params = self.parameter_list() if not values: return command, params if inline and types: raise ValueError('Typed parameters must be sent separately') if isinstance(values, (list, tuple)): if inline: adapt = self.adapt_inline seq_literals = [adapt(value) for value in values] else: add = params.add if types: if isinstance(types, str): types = types.split() if (not isinstance(types, (list, tuple)) or len(types) != len(values)): raise TypeError('The values and types do not match') seq_literals = [add(value, typ) for value, typ in zip(values, types)] else: seq_literals = [add(value) for value in values] command %= tuple(seq_literals) elif isinstance(values, dict): # we want to allow extra keys in the dictionary, # so we first must find the values actually used in the command used_values = {} map_literals = dict.fromkeys(values, '') for key in values: del map_literals[key] try: command % map_literals except KeyError: used_values[key] = values[key] # pyright: ignore map_literals[key] = '' if inline: adapt = self.adapt_inline map_literals = {key: adapt(value) for key, value in used_values.items()} else: add = params.add if types: if not isinstance(types, dict): raise TypeError('The values and types do not match') map_literals = {key: add(used_values[key], types.get(key)) for key in sorted(used_values)} else: map_literals = {key: add(used_values[key]) for key in sorted(used_values)} command %= map_literals else: raise TypeError('The values must be passed as tuple, list or dict') return command, params PyGreSQL-PyGreSQL-166b135/pg/attrs.py000066400000000000000000000021061450706350600171170ustar00rootroot00000000000000"""Helpers for memorizing attributes.""" from typing import Any __all__ = ['AttrDict'] class AttrDict(dict): """Simple read-only ordered dictionary for storing attribute names.""" def __init__(self, *args: Any, **kw: Any) -> None: """Initialize the dictionary.""" self._read_only = False super().__init__(*args, **kw) self._read_only = True error = self._read_only_error self.clear = self.update = error # type: ignore self.pop = self.setdefault = self.popitem = error # type: ignore def __setitem__(self, key: str, value: Any) -> None: """Set a value.""" if self._read_only: self._read_only_error() super().__setitem__(key, value) def __delitem__(self, key: str) -> None: """Delete a value.""" if self._read_only: self._read_only_error() super().__delitem__(key) @staticmethod def _read_only_error(*_args: Any, **_kw: Any) -> Any: """Raise error for write operations.""" raise TypeError('This object is read-only') PyGreSQL-PyGreSQL-166b135/pg/cast.py000066400000000000000000000354351450706350600167270ustar00rootroot00000000000000"""Typecasting mechanisms.""" from __future__ import annotations from collections import namedtuple from datetime import date, datetime, timedelta from functools import partial from inspect import signature from re import compile as regex from typing import Any, Callable, ClassVar, Sequence from uuid import UUID from .attrs import AttrDict from .core import ( Connection, cast_array, cast_hstore, cast_record, get_bool, get_decimal, get_decimal_point, get_jsondecode, unescape_bytea, ) from .tz import timezone_as_offset __all__ = [ 'cast_bool', 'cast_json', 'cast_num', 'cast_money', 'cast_int2vector', 'cast_date', 'cast_time', 'cast_timetz', 'cast_interval', 'cast_timestamp','cast_timestamptz', 'Typecasts', 'get_typecast', 'set_typecast' ] def get_args(func: Callable) -> list: """Get the arguments of a function.""" return list(signature(func).parameters) def cast_bool(value: str) -> Any: """Cast a boolean value.""" if not get_bool(): return value return value[0] == 't' def cast_json(value: str) -> Any: """Cast a JSON value.""" cast = get_jsondecode() if not cast: return value return cast(value) def cast_num(value: str) -> Any: """Cast a numeric value.""" return (get_decimal() or float)(value) def cast_money(value: str) -> Any: """Cast a money value.""" point = get_decimal_point() if not point: return value if point != '.': value = value.replace(point, '.') value = value.replace('(', '-') value = ''.join(c for c in value if c.isdigit() or c in '.-') return (get_decimal() or float)(value) def cast_int2vector(value: str) -> list[int]: """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value: str, connection: Connection) -> Any: """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max values = value.split() if values[-1] == 'BC': return date.min value = values[0] if len(value) > 10: return date.max format = connection.date_format() return datetime.strptime(value, format).date() def cast_time(value: str) -> Any: """Cast a time value.""" format = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, format).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value: str) -> Any: """Cast a timetz value.""" m = _re_timezone.match(value) if m: value, tz = m.groups() else: tz = '+0000' format = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' value += timezone_as_offset(tz) format += '%z' return datetime.strptime(value, format).timetz() def cast_timestamp(value: str, connection: Connection) -> Any: """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max values = value.split() if values[-1] == 'BC': return datetime.min format = connection.date_format() if format.endswith('-%Y') and len(values) > 2: values = values[1:5] if len(values[3]) > 4: return datetime.max formats = ['%d %b' if format.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(values[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(values[0]) > 10: return datetime.max formats = [format, '%H:%M:%S.%f' if len(values[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(values), ' '.join(formats)) def cast_timestamptz(value: str, connection: Connection) -> Any: """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max values = value.split() if values[-1] == 'BC': return datetime.min format = connection.date_format() if format.endswith('-%Y') and len(values) > 2: values = values[1:] if len(values[3]) > 4: return datetime.max formats = ['%d %b' if format.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(values[2]) > 8 else '%H:%M:%S', '%Y'] values, tz = values[:-1], values[-1] else: if format.startswith('%Y-'): m = _re_timezone.match(values[1]) if m: values[1], tz = m.groups() else: tz = '+0000' else: values, tz = values[:-1], values[-1] if len(values[0]) > 10: return datetime.max formats = [format, '%H:%M:%S.%f' if len(values[1]) > 8 else '%H:%M:%S'] values.append(timezone_as_offset(tz)) formats.append('%z') return datetime.strptime(' '.join(values), ' '.join(formats)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value: str) -> timedelta: """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: s = [v or '0' for v in m.groups()] secs_ago = s.pop(5) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: s, ago = [v or '0' for v in m.groups()[:8]], m.group(9) secs_ago = s.pop(5) == '-' d = [-int(v) for v in s] if ago else [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): s = [v or '0' for v in m.groups()] hours_ago = s.pop(3) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): s = [v or '0' for v in m.groups()] years_ago = s.pop(0) == '-' hours_ago = s.pop(3) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError(f'Cannot parse interval: {value}') days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. Note that the basic types are already handled by the C extension. They only need to be handled here as record or array components. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults: ClassVar[dict[str, Callable]] = { 'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'sql_identifier': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': int, 'oid': int, 'hstore': cast_hstore, 'json': cast_json, 'jsonb': cast_json, 'float4': float, 'float8': float, 'numeric': cast_num, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': UUID, 'anyarray': cast_array, 'record': cast_record} # pyright: ignore connection: Connection | None = None # set in connection specific instance def __missing__(self, typ: str) -> Callable | None: """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError(f'Invalid type: {typ}') cast: Callable | None = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: attnames = self.get_attnames(typ) if attnames: casts = [self[v.pgtype] for v in attnames.values()] cast = self.create_record_cast(typ, attnames, casts) self[typ] = cast return cast @staticmethod def _needs_connection(func: Callable) -> bool: """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False return 'connection' in args[1:] def _add_connection(self, cast: Callable) -> Callable: """Add a connection argument to the typecast function if necessary.""" if not self.connection or not self._needs_connection(cast): return cast return partial(cast, connection=self.connection) def get(self, typ: str, default: Callable | None = None # type: ignore ) -> Callable | None: """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ: str | Sequence[str], cast: Callable | None) -> None: """Set a typecast function for the specified database type(s).""" if isinstance(typ, str): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop(f'_{t}', None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop(f'_{t}', None) def reset(self, typ: str | Sequence[str] | None = None) -> None: """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ if typ is None: self.clear() else: if isinstance(typ, str): typ = [typ] for t in typ: self.pop(t, None) @classmethod def get_default(cls, typ: str) -> Any: """Get the default typecast function for the given database type.""" return cls.defaults.get(typ) @classmethod def set_default(cls, typ: str | Sequence[str], cast: Callable | None) -> None: """Set a default typecast function for the given database type(s).""" if isinstance(typ, str): typ = [typ] defaults = cls.defaults if cast is None: for t in typ: defaults.pop(t, None) defaults.pop(f'_{t}', None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: defaults[t] = cast defaults.pop(f'_{t}', None) # noinspection PyMethodMayBeStatic,PyUnusedLocal def get_attnames(self, typ: Any) -> AttrDict: """Return the fields for the given record type. This method will be replaced with the get_attnames() method of DbTypes. """ return AttrDict() # noinspection PyMethodMayBeStatic def dateformat(self) -> str: """Return the current date format. This method will be replaced with the dateformat() method of DbTypes. """ return '%Y-%m-%d' def create_array_cast(self, basecast: Callable) -> Callable: """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v: Any) -> list: return cast_array(v, basecast) return cast def create_record_cast(self, name: str, fields: AttrDict, casts: list[Callable]) -> Callable: """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) # type: ignore def cast(v: Any) -> record: # noinspection PyArgumentList return record(*cast_record(v, casts)) return cast def get_typecast(typ: str) -> Callable | None: """Get the global typecast function for the given database type.""" return Typecasts.get_default(typ) def set_typecast(typ: str | Sequence[str], cast: Callable | None) -> None: """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call db.db_types.reset_typecast(). """ Typecasts.set_default(typ, cast) PyGreSQL-PyGreSQL-166b135/pg/core.py000066400000000000000000000071401450706350600167150ustar00rootroot00000000000000"""Core functionality from extension module.""" try: from ._pg import version except ImportError as e: # noqa: F841 import os libpq = 'libpq.' if os.name == 'nt': libpq += 'dll' import sys paths = [path for path in os.environ["PATH"].split(os.pathsep) if os.path.exists(os.path.join(path, libpq))] if sys.version_info >= (3, 8): # see https://docs.python.org/3/whatsnew/3.8.html#ctypes add_dll_dir = os.add_dll_directory # type: ignore for path in paths: with add_dll_dir(os.path.abspath(path)): try: from ._pg import version except ImportError: pass else: del version e = None # type: ignore break if paths: libpq = 'compatible ' + libpq else: libpq += 'so' if e: raise ImportError( "Cannot import shared library for PyGreSQL,\n" f"probably because no {libpq} is installed.\n{e}") from e else: del version # import objects from extension module from ._pg import ( INV_READ, INV_WRITE, POLLING_FAILED, POLLING_OK, POLLING_READING, POLLING_WRITING, RESULT_DDL, RESULT_DML, RESULT_DQL, RESULT_EMPTY, SEEK_CUR, SEEK_END, SEEK_SET, TRANS_ACTIVE, TRANS_IDLE, TRANS_INERROR, TRANS_INTRANS, TRANS_UNKNOWN, Connection, DatabaseError, DataError, Error, IntegrityError, InterfaceError, InternalError, InvalidResultError, LargeObject, MultipleResultsError, NoResultError, NotSupportedError, OperationalError, ProgrammingError, Query, Warning, cast_array, cast_hstore, cast_record, connect, escape_bytea, escape_string, get_array, get_bool, get_bytea_escaped, get_datestyle, get_decimal, get_decimal_point, get_defbase, get_defhost, get_defopt, get_defport, get_defuser, get_jsondecode, get_pqlib_version, set_array, set_bool, set_bytea_escaped, set_datestyle, set_decimal, set_decimal_point, set_defbase, set_defhost, set_defopt, set_defpasswd, set_defport, set_defuser, set_jsondecode, set_query_helpers, unescape_bytea, version, ) __all__ = [ 'Error', 'Warning', 'DataError', 'DatabaseError', 'IntegrityError', 'InterfaceError', 'InternalError', 'InvalidResultError', 'MultipleResultsError', 'NoResultError', 'NotSupportedError', 'OperationalError', 'ProgrammingError', 'Connection', 'Query', 'LargeObject', 'INV_READ', 'INV_WRITE', 'POLLING_OK', 'POLLING_FAILED', 'POLLING_READING', 'POLLING_WRITING', 'RESULT_DDL', 'RESULT_DML', 'RESULT_DQL', 'RESULT_EMPTY', 'SEEK_CUR', 'SEEK_END', 'SEEK_SET', 'TRANS_ACTIVE', 'TRANS_IDLE', 'TRANS_INERROR', 'TRANS_INTRANS', 'TRANS_UNKNOWN', 'cast_array', 'cast_hstore', 'cast_record', 'connect', 'escape_bytea', 'escape_string', 'unescape_bytea', 'get_array', 'get_bool', 'get_bytea_escaped', 'get_datestyle', 'get_decimal', 'get_decimal_point', 'get_defbase', 'get_defhost', 'get_defopt', 'get_defport', 'get_defuser', 'get_jsondecode', 'get_pqlib_version', 'set_array', 'set_bool', 'set_bytea_escaped', 'set_datestyle', 'set_decimal', 'set_decimal_point', 'set_defbase', 'set_defhost', 'set_defopt', 'set_defpasswd', 'set_defport', 'set_defuser', 'set_jsondecode', 'set_query_helpers', 'version', ] PyGreSQL-PyGreSQL-166b135/pg/db.py000066400000000000000000001724511450706350600163620ustar00rootroot00000000000000"""Connection wrapper.""" from __future__ import annotations from contextlib import suppress from json import dumps as jsonencode from json import loads as jsondecode from operator import itemgetter from typing import ( TYPE_CHECKING, Any, Callable, Iterator, Sequence, TypeVar, overload, ) from . import Connection, connect from .adapt import Adapter, DbTypes from .attrs import AttrDict from .core import ( InternalError, LargeObject, ProgrammingError, Query, get_bool, get_jsondecode, unescape_bytea, ) from .error import db_error, int_error, prg_error from .helpers import namediter, oid_key, quote_if_unqualified from .notify import NotificationHandler if TYPE_CHECKING: from pgdb.connection import Connection as DbApi2Connection try: AnyStr = TypeVar('AnyStr', str, bytes, str | bytes) except TypeError: # Python < 3.10 AnyStr = Any # type: ignore __all__ = ['DB'] # The actual PostgreSQL database connection interface: class DB: """Wrapper class for the core connection type.""" dbname: str host: str port: int options: str error: str status: int user : str protocol_version: int server_version: int socket: int backend_pid: int ssl_in_use: bool ssl_attributes: dict[str, str | None] db: Connection | None = None # invalid fallback for underlying connection _db_args: Any # either the connect args or the underlying connection @overload def __init__(self, dbname: str | None = None, host: str | None = None, port: int = -1, opt: str | None = None, user: str | None = None, passwd: str | None = None, nowait: bool = False) -> None: ... # create a new connection using the specified parameters @overload def __init__(self, db: Connection | DB | DbApi2Connection) -> None: ... # create a connection wrapper based on an existing connection def __init__(self, *args: Any, **kw: Any) -> None: """Create a new connection. You can pass either the connection parameters or an existing pg or pgdb Connection. This allows you to use the methods of the classic pg interface with a DB-API 2 pgdb Connection. """ if kw: db = kw.get('db') if db is not None and (args or len(kw) > 1): raise TypeError("Conflicting connection parameters") elif len(args) == 1 and not isinstance(args[0], str): db = args[0] else: db = None if db: if isinstance(db, DB): db = db.db # allow db to be a wrapped Connection else: with suppress(AttributeError): db = db._cnx # allow db to be a pgdb Connection if not isinstance(db, Connection): raise TypeError( "The 'db' argument must be a valid database connection.") self._db_args = db self._closeable = False else: db = connect(*args, **kw) self._db_args = args, kw self._closeable = True self.db = db self.dbname = db.db self._regtypes = False self._attnames: dict[str, AttrDict] = {} self._generated: dict[str, frozenset[str]] = {} self._pkeys: dict[str, str | tuple[str, ...]] = {} self._privileges: dict[tuple[str, str], bool] = {} self.adapter = Adapter(self) self.dbtypes = DbTypes(self) self._query_attnames = ( "SELECT a.attname," " t.oid, t.typname, t.oid::pg_catalog.regtype," " t.typlen, t.typtype, t.typcategory, t.typdelim, t.typrelid" " FROM pg_catalog.pg_attribute a" " JOIN pg_catalog.pg_type t" " ON t.oid OPERATOR(pg_catalog.=) a.atttypid" " WHERE a.attrelid OPERATOR(pg_catalog.=)" " {}::pg_catalog.regclass" " AND {} AND NOT a.attisdropped ORDER BY a.attnum") if db.server_version < 120000: self._query_generated = ( "a.attidentity OPERATOR(pg_catalog.=) 'a'" ) else: self._query_generated = ( "(a.attidentity OPERATOR(pg_catalog.=) 'a' OR" " a.attgenerated OPERATOR(pg_catalog.!=) '')" ) db.set_cast_hook(self.dbtypes.typecast) # For debugging scripts, self.debug can be set # * to a string format specification (e.g. in CGI set to "%s
"), # * to a file object to write debug statements or # * to a callable object which takes a string argument # * to any other true value to just print debug statements self.debug: Any = None def __getattr__(self, name: str) -> Any: """Get the specified attribute of the connection.""" # All undefined members are same as in underlying connection: if self.db: return getattr(self.db, name) else: raise int_error('Connection is not valid') def __dir__(self) -> list[str]: """List all attributes of the connection.""" # Custom dir function including the attributes of the connection: attrs = set(self.__class__.__dict__) attrs.update(self.__dict__) attrs.update(dir(self.db)) return sorted(attrs) # Context manager methods def __enter__(self) -> DB: """Enter the runtime context. This will start a transaction.""" self.begin() return self def __exit__(self, et: type[BaseException] | None, ev: BaseException | None, tb: Any) -> None: """Exit the runtime context. This will end the transaction.""" if et is None and ev is None and tb is None: self.commit() else: self.rollback() def __del__(self) -> None: """Delete the connection.""" try: db = self.db except AttributeError: db = None if db: with suppress(TypeError): # when already closed db.set_cast_hook(None) if self._closeable: with suppress(InternalError): # when already closed db.close() # Auxiliary methods def _do_debug(self, *args: Any) -> None: """Print a debug message.""" if self.debug: s = '\n'.join(str(arg) for arg in args) if isinstance(self.debug, str): print(self.debug % s) elif hasattr(self.debug, 'write'): # noinspection PyCallingNonCallable self.debug.write(s + '\n') elif callable(self.debug): self.debug(s) else: print(s) def _escape_qualified_name(self, s: str) -> str: """Escape a qualified name. Escapes the name for use as an SQL identifier, unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if '.' not in s: s = self.escape_identifier(s) return s @staticmethod def _make_bool(d: Any) -> bool | str: """Get boolean value corresponding to d.""" return bool(d) if get_bool() else ('t' if d else 'f') @staticmethod def _list_params(params: Sequence) -> str: """Create a human readable parameter list.""" return ', '.join(f'${n}={v!r}' for n, v in enumerate(params, 1)) @property def _valid_db(self) -> Connection: """Get underlying connection and make sure it is not closed.""" db = self.db if not db: raise int_error('Connection already closed') return db # Public methods # escape_string and escape_bytea exist as methods, # so we define unescape_bytea as a method as well unescape_bytea = staticmethod(unescape_bytea) @staticmethod def decode_json(s: str) -> Any: """Decode a JSON string coming from the database.""" return (get_jsondecode() or jsondecode)(s) @staticmethod def encode_json(d: Any) -> str: """Encode a JSON string for use within SQL.""" return jsonencode(d) def close(self) -> None: """Close the database connection.""" # Wraps shared library function so we can track state. db = self._valid_db with suppress(TypeError): # when already closed db.set_cast_hook(None) if self._closeable: db.close() self.db = None def reset(self) -> None: """Reset connection with current parameters. All derived queries and large objects derived from this connection will not be usable after this call. """ self._valid_db.reset() def reopen(self) -> None: """Reopen connection to the database. Used in case we need another connection to the same database. Note that we can still reopen a database that we have closed. """ # There is no such shared library function. if self._closeable: args, kw = self._db_args db = connect(*args, **kw) if self.db: self.db.set_cast_hook(None) self.db.close() db.set_cast_hook(self.dbtypes.typecast) self.db = db else: self.db = self._db_args def begin(self, mode: str | None = None) -> Query: """Begin a transaction.""" qstr = 'BEGIN' if mode: qstr += ' ' + mode return self.query(qstr) start = begin def commit(self) -> Query: """Commit the current transaction.""" return self.query('COMMIT') end = commit def rollback(self, name: str | None = None) -> Query: """Roll back the current transaction.""" qstr = 'ROLLBACK' if name: qstr += ' TO ' + name return self.query(qstr) abort = rollback def savepoint(self, name: str) -> Query: """Define a new savepoint within the current transaction.""" return self.query('SAVEPOINT ' + name) def release(self, name: str) -> Query: """Destroy a previously defined savepoint.""" return self.query('RELEASE ' + name) def get_parameter(self, parameter: str | list[str] | tuple[str, ...] | set[str] | frozenset[str] | dict[str, Any] ) -> str | list[str] | dict[str, str]: """Get the value of a run-time parameter. If the parameter is a string, the return value will also be a string that is the current setting of the run-time parameter with that name. You can get several parameters at once by passing a list, set or dict. When passing a list of parameter names, the return value will be a corresponding list of parameter settings. When passing a set of parameter names, a new dict will be returned, mapping these parameter names to their settings. Finally, if you pass a dict as parameter, its values will be set to the current parameter settings corresponding to its keys. By passing the special name 'all' as the parameter, you can get a dict of all existing configuration parameters. """ values: Any if isinstance(parameter, str): parameter = [parameter] values = None elif isinstance(parameter, (list, tuple)): values = [] elif isinstance(parameter, (set, frozenset)): values = {} elif isinstance(parameter, dict): values = parameter else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') query = self._valid_db.query params: Any = {} if isinstance(values, dict) else [] for param_key in parameter: param = param_key.strip().lower() if isinstance( param_key, (bytes, str)) else None if not param: raise TypeError('Invalid parameter') if param == 'all': cmd = 'SHOW ALL' values = query(cmd).getresult() values = {value[0]: value[1] for value in values} break if isinstance(params, dict): params[param] = param_key else: params.append(param) else: for param in params: cmd = f'SHOW {param}' value = query(cmd).singlescalar() if values is None: values = value elif isinstance(values, list): values.append(value) else: values[params[param]] = value return values def set_parameter(self, parameter: str | list[str] | tuple[str, ...] | set[str] | frozenset[str] | dict[str, Any], value: str | list[str] | tuple[str, ...] | set[str] | frozenset[str]| None = None, local: bool = False) -> None: """Set the value of a run-time parameter. If the parameter and the value are strings, the run-time parameter will be set to that value. If no value or None is passed as a value, then the run-time parameter will be restored to its default value. You can set several parameters at once by passing a list of parameter names, together with a single value that all parameters should be set to or with a corresponding list of values. You can also pass the parameters as a set if you only provide a single value. Finally, you can pass a dict with parameter names as keys. In this case, you should not pass a value, since the values for the parameters will be taken from the dict. By passing the special name 'all' as the parameter, you can reset all existing settable run-time parameters to their default values. If you set local to True, then the command takes effect for only the current transaction. After commit() or rollback(), the session-level setting takes effect again. Setting local to True will appear to have no effect if it is executed outside a transaction, since the transaction will end immediately. """ if isinstance(parameter, str): parameter = {parameter: value} elif isinstance(parameter, (list, tuple)): if isinstance(value, (list, tuple)): parameter = dict(zip(parameter, value)) else: parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, (set, frozenset)): if isinstance(value, (list, tuple, set, frozenset)): value = set(value) if len(value) == 1: value = next(iter(value)) if not (value is None or isinstance(value, str)): raise ValueError( 'A single value must be specified' ' when parameter is a set') parameter = dict.fromkeys(parameter, value) elif isinstance(parameter, dict): if value is not None: raise ValueError( 'A value must not be specified' ' when parameter is a dictionary') else: raise TypeError( 'The parameter must be a string, list, set or dict') if not parameter: raise TypeError('No parameter has been specified') params: dict[str, str | None] = {} for param, param_value in parameter.items(): param = param.strip().lower() if isinstance(param, str) else None if not param: raise TypeError('Invalid parameter') if param == 'all': if param_value is not None: raise ValueError( 'A value must not be specified' " when parameter is 'all'") params = {'all': None} break params[param] = param_value local_clause = ' LOCAL' if local else '' for param, param_value in params.items(): cmd = (f'RESET{local_clause} {param}' if param_value is None else f'SET{local_clause} {param} TO {param_value}') self._do_debug(cmd) self._valid_db.query(cmd) def query(self, command: str, *args: Any) -> Query: """Execute a SQL command string. This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row. If the query is an update or delete statement, or an insert statement that did not insert exactly one row in a table with OIDs, then the number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an "insert/update ... returning" statement), this method returns a Query object that can be accessed via getresult() or dictresult() or simply printed. Otherwise, it returns `None`. The query can contain numbered parameters of the form $1 in place of any data constant. Arguments given after the query string will be substituted for the corresponding numbered parameter. Parameter values can also be given as a single list or tuple argument. """ # Wraps shared library function for debugging. db = self._valid_db if args: self._do_debug(command, args) return db.query(command, args) self._do_debug(command) return db.query(command) def query_formatted(self, command: str, parameters: tuple | list | dict | None = None, types: tuple | list | dict | None = None, inline: bool =False) -> Query: """Execute a formatted SQL command string. Similar to query, but using Python format placeholders of the form %s or %(names)s instead of PostgreSQL placeholders of the form $1. The parameters must be passed as a tuple, list or dict. You can also pass a corresponding tuple, list or dict of database types in order to format the parameters properly in case there is ambiguity. If you set inline to True, the parameters will be sent to the database embedded in the SQL command, otherwise they will be sent separately. """ return self.query(*self.adapter.format_query( command, parameters, types, inline)) def query_prepared(self, name: str, *args: Any) -> Query: """Execute a prepared SQL statement. This works like the query() method, except that instead of passing the SQL command, you pass the name of a prepared statement. If you pass an empty name, the unnamed statement will be executed. """ if name is None: name = '' db = self._valid_db if args: self._do_debug('EXECUTE', name, args) return db.query_prepared(name, args) self._do_debug('EXECUTE', name) return db.query_prepared(name) def prepare(self, name: str, command: str) -> None: """Create a prepared SQL statement. This creates a prepared statement for the given command with the given name for later execution with the query_prepared() method. The name can be empty to create an unnamed statement, in which case any pre-existing unnamed statement is automatically replaced; otherwise it is an error if the statement name is already defined in the current database session. We recommend always using named queries, since unnamed queries have a limited lifetime and can be automatically replaced or destroyed by various operations. """ if name is None: name = '' self._do_debug('prepare', name, command) self._valid_db.prepare(name, command) def describe_prepared(self, name: str | None = None) -> Query: """Describe a prepared SQL statement. This method returns a Query object describing the result columns of the prepared statement with the given name. If you omit the name, the unnamed statement will be described if you created one before. """ if name is None: name = '' return self._valid_db.describe_prepared(name) def delete_prepared(self, name: str | None = None) -> Query: """Delete a prepared SQL statement. This deallocates a previously prepared SQL statement with the given name, or deallocates all prepared statements if you do not specify a name. Note that prepared statements are also deallocated automatically when the current session ends. """ if not name: name = 'ALL' cmd = f"DEALLOCATE {name}" self._do_debug(cmd) return self._valid_db.query(cmd) def pkey(self, table: str, composite: bool = False, flush: bool = False ) -> str | tuple[str, ...]: """Get the primary key of a table. Single primary keys are returned as strings unless you set the composite flag. Composite primary keys are always represented as tuples. Note that this raises a KeyError if the table does not have a primary key. If flush is set then the internal cache for primary keys will be flushed. This may be necessary after the database schema or the search path has been changed. """ pkeys = self._pkeys if flush: pkeys.clear() self._do_debug('The pkey cache has been flushed') try: # cache lookup pkey = pkeys[table] except KeyError as e: # cache miss, check the database cmd = ("SELECT" # noqa: S608 " a.attname, a.attnum, i.indkey" " FROM pg_catalog.pg_index i" " JOIN pg_catalog.pg_attribute a" " ON a.attrelid OPERATOR(pg_catalog.=) i.indrelid" " AND a.attnum OPERATOR(pg_catalog.=) ANY(i.indkey)" " AND NOT a.attisdropped" " WHERE i.indrelid OPERATOR(pg_catalog.=)" " {}::pg_catalog.regclass" " AND i.indisprimary ORDER BY a.attnum").format( quote_if_unqualified('$1', table)) res = self._valid_db.query(cmd, (table,)).getresult() if not res: raise KeyError(f'Table {table} has no primary key') from e # we want to use the order defined in the primary key index here, # not the order as defined by the columns in the table if len(res) > 1: indkey = res[0][2] pkey = tuple(row[0] for row in sorted( res, key=lambda row: indkey.index(row[1]))) else: pkey = res[0][0] pkeys[table] = pkey # cache it if composite and not isinstance(pkey, tuple): pkey = (pkey,) return pkey def pkeys(self, table: str) -> tuple[str, ...]: """Get the primary key of a table as a tuple. Same as pkey() with 'composite' set to True. """ return self.pkey(table, True) # type: ignore def get_databases(self) -> list[str]: """Get list of databases in the system.""" return [r[0] for r in self._valid_db.query( 'SELECT datname FROM pg_catalog.pg_database').getresult()] def get_relations(self, kinds: str | Sequence[str] | None = None, system: bool = False) -> list[str]: """Get list of relations in connected database of specified kinds. If kinds is None or empty, all kinds of relations are returned. Otherwise, kinds can be a string or sequence of type letters specifying which kind of relations you want to list. Set the system flag if you want to get the system relations as well. """ where_parts = [] if kinds: where_parts.append( "r.relkind IN ({})".format(','.join(f"'{k}'" for k in kinds))) if not system: where_parts.append("s.nspname NOT SIMILAR" " TO 'pg/_%|information/_schema' ESCAPE '/'") where = " WHERE " + ' AND '.join(where_parts) if where_parts else '' cmd = ("SELECT" # noqa: S608 " pg_catalog.quote_ident(s.nspname) OPERATOR(pg_catalog.||)" " '.' OPERATOR(pg_catalog.||) pg_catalog.quote_ident(r.relname)" " FROM pg_catalog.pg_class r" " JOIN pg_catalog.pg_namespace s" f" ON s.oid OPERATOR(pg_catalog.=) r.relnamespace{where}" " ORDER BY s.nspname, r.relname") return [r[0] for r in self._valid_db.query(cmd).getresult()] def get_tables(self, system: bool = False) -> list[str]: """Return list of tables in connected database. Set the system flag if you want to get the system tables as well. """ return self.get_relations('r', system) def get_attnames(self, table: str, with_oid: bool=True, flush: bool=False ) -> AttrDict: """Given the name of a table, dig out the set of attribute names. Returns a read-only dictionary of attribute names (the names are the keys, the values are the names of the attributes' types) with the column names in the proper order if you iterate over it. If flush is set, then the internal cache for attribute names will be flushed. This may be necessary after the database schema or the search path has been changed. By default, only a limited number of simple types will be returned. You can get the registered types after calling use_regtypes(True). """ attnames = self._attnames if flush: attnames.clear() self._do_debug('The attnames cache has been flushed') try: # cache lookup names = attnames[table] except KeyError: # cache miss, check the database cmd = "a.attnum OPERATOR(pg_catalog.>) 0" if with_oid: cmd = f"({cmd} OR a.attname OPERATOR(pg_catalog.=) 'oid')" cmd = self._query_attnames.format( quote_if_unqualified('$1', table), cmd) res = self._valid_db.query(cmd, (table,)).getresult() types = self.dbtypes names = AttrDict((name[0], types.add(*name[1:])) for name in res) attnames[table] = names # cache it return names def get_generated(self, table: str, flush: bool = False) -> frozenset[str]: """Given the name of a table, dig out the set of generated columns. Returns a set of column names that are generated and unalterable. If flush is set, then the internal cache for generated columns will be flushed. This may be necessary after the database schema or the search path has been changed. """ generated = self._generated if flush: generated.clear() self._do_debug('The generated cache has been flushed') try: # cache lookup names = generated[table] except KeyError: # cache miss, check the database cmd = "a.attnum OPERATOR(pg_catalog.>) 0" cmd = f"{cmd} AND {self._query_generated}" cmd = self._query_attnames.format( quote_if_unqualified('$1', table), cmd) res = self._valid_db.query(cmd, (table,)).getresult() names = frozenset(name[0] for name in res) generated[table] = names # cache it return names def use_regtypes(self, regtypes: bool | None = None) -> bool: """Use registered type names instead of simplified type names.""" if regtypes is None: return self.dbtypes._regtypes regtypes = bool(regtypes) if regtypes != self.dbtypes._regtypes: self.dbtypes._regtypes = regtypes self._attnames.clear() self.dbtypes.clear() return regtypes def has_table_privilege(self, table: str, privilege: str = 'select', flush: bool = False) -> bool: """Check whether current user has specified table privilege. If flush is set, then the internal cache for table privileges will be flushed. This may be necessary after privileges have been changed. """ privileges = self._privileges if flush: privileges.clear() self._do_debug('The privileges cache has been flushed') privilege = privilege.lower() try: # ask cache ret = privileges[table, privilege] except KeyError: # cache miss, ask the database cmd = "SELECT pg_catalog.has_table_privilege({}, $2)".format( quote_if_unqualified('$1', table)) query = self._valid_db.query(cmd, (table, privilege)) ret = query.singlescalar() == self._make_bool(True) privileges[table, privilege] = ret # cache it return ret def get(self, table: str, row: Any, keyname: str | tuple[str, ...] | None = None) -> dict[str, Any]: """Get a row from a database table or view. This method is the basic mechanism to get a single row. It assumes that the keyname specifies a unique row. It must be the name of a single column or a tuple of column names. If the keyname is not specified, then the primary key for the table is used. If row is a dictionary, then the value for the key is taken from it. Otherwise, the row must be a single value or a tuple of values corresponding to the passed keyname or primary key. The fetched row from the table will be returned as a new dictionary or used to replace the existing values when row was passed as a dictionary. The OID is also put into the dictionary if the table has one, but in order to allow the caller to work with multiple tables, it is munged as "oid(table)" using the actual name of the table. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = oid_key(table) if 'oid' in attnames else None if keyname and isinstance(keyname, str): keyname = (keyname,) if qoid and isinstance(row, dict) and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if not keyname: try: # if keyname is not specified, try using the primary key keyname = self.pkeys(table) except KeyError as e: # the table has no primary key # try using the oid instead if qoid and isinstance(row, dict) and 'oid' in row: keyname = ('oid',) else: raise prg_error( f'Table {table} has no primary key') from e else: # the table has a primary key # check whether all key columns have values if isinstance(row, dict) and not set(keyname).issubset(row): # try using the oid instead if qoid and 'oid' in row: keyname = ('oid',) else: raise KeyError( 'Missing value in row for specified keyname') if not isinstance(row, dict): if not isinstance(row, (tuple, list)): row = [row] if len(keyname) != len(row): raise KeyError( 'Differing number of items in keyname and row') row = dict(zip(keyname, row)) params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier what = 'oid, *' if qoid else '*' where = ' AND '.join('{} OPERATOR(pg_catalog.=) {}'.format( col(k), adapt(row[k], attnames[k])) for k in keyname) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] t = self._escape_qualified_name(table) cmd = f'SELECT {what} FROM {t} WHERE {where} LIMIT 1' # noqa: S608s self._do_debug(cmd, params) query = self._valid_db.query(cmd, params) res = query.dictresult() if not res: # make where clause in error message better readable where = where.replace('OPERATOR(pg_catalog.=)', '=') raise db_error( f'No such record in {table}\nwhere {where}\nwith ' + self._list_params(params)) for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def insert(self, table: str, row: dict[str, Any] | None = None, **kw: Any ) -> dict[str, Any]: """Insert a row into a database table. This method inserts a row into a table. The name of the table must be passed as the first parameter. The other parameters are used for providing the data of the row that shall be inserted into the table. If a dictionary is supplied as the second parameter, it starts with that. Otherwise, it uses a blank dictionary. Either way the dictionary is updated from the keywords. The dictionary is then reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} row.update(kw) if 'oid' in row: del row['oid'] # do not insert oid attnames = self.get_attnames(table) generated = self.get_generated(table) qoid = oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier name_list, value_list = [], [] for n in attnames: if n in row and n not in generated: name_list.append(col(n)) value_list.append(adapt(row[n], attnames[n])) if not name_list: raise prg_error('No column found that can be inserted') names, values = ', '.join(name_list), ', '.join(value_list) ret = 'oid, *' if qoid else '*' t = self._escape_qualified_name(table) cmd = (f'INSERT INTO {t} ({names})' # noqa: S608 f' VALUES ({values}) RETURNING {ret}') self._do_debug(cmd, params) query = self._valid_db.query(cmd, params) res = query.dictresult() if res: # this should always be true for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def update(self, table: str, row: dict[str, Any] | None = None, **kw : Any ) -> dict[str, Any]: """Update an existing row in a database table. Similar to insert, but updates an existing row. The update is based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided, so that it is possible to update the primary key itself. The dictionary is then modified to reflect any changes caused by the update due to triggers, rules, default values, etc. """ if table.endswith('*'): table = table[:-1].rstrip() # need parent table name attnames = self.get_attnames(table) generated = self.get_generated(table) qoid = oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keynames: tuple[str, ...] = ('oid',) keyset = set(keynames) else: # try using the primary key try: keynames = self.pkeys(table) except KeyError as e: # the table has no primary key raise prg_error(f'Table {table} has no primary key') from e keyset = set(keynames) # check whether all key columns have values if not keyset.issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('{} OPERATOR(pg_catalog.=) {}'.format( col(k), adapt(row[k], attnames[k])) for k in keynames) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] values_list = [] for n in attnames: if n in row and n not in keyset and n not in generated: values_list.append(f'{col(n)} = {adapt(row[n], attnames[n])}') if not values_list: return row values = ', '.join(values_list) ret = 'oid, *' if qoid else '*' t = self._escape_qualified_name(table) cmd = (f'UPDATE {t} SET {values}' # noqa: S608 f' WHERE {where} RETURNING {ret}') self._do_debug(cmd, params) query = self._valid_db.query(cmd, params) res = query.dictresult() if res: # may be empty when row does not exist for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value return row def upsert(self, table: str, row: dict[str, Any] | None = None, **kw: Any ) -> dict[str, Any]: """Insert a row into a database table with conflict resolution. This method inserts a row into a table, but instead of raising a ProgrammingError exception in case a row with the same primary key already exists, an update will be executed instead. This will be performed as a single atomic operation on the database, so race conditions can be avoided. Like the insert method, the first parameter is the name of the table and the second parameter can be used to pass the values to be inserted as a dictionary. Unlike the insert und update statement, keyword parameters are not used to modify the dictionary, but to specify which columns shall be updated in case of a conflict, and in which way: A value of False or None means the column shall not be updated, a value of True means the column shall be updated with the value that has been proposed for insertion, i.e. has been passed as value in the dictionary. Columns that are not specified by keywords but appear as keys in the dictionary are also updated like in the case keywords had been passed with the value True. So if in the case of a conflict you want to update every column that has been passed in the dictionary row, you would call upsert(table, row). If you don't want to do anything in case of a conflict, i.e. leave the existing row as it is, call upsert(table, row, **dict.fromkeys(row)). If you need more fine-grained control of what gets updated, you can also pass strings in the keyword parameters. These strings will be used as SQL expressions for the update columns. In these expressions you can refer to the value that already exists in the table by prefixing the column name with "included.", and to the value that has been proposed for insertion by prefixing the column name with the "excluded." The dictionary is modified in any case to reflect the values in the database after the operation has completed. Note: The method uses the PostgreSQL "upsert" feature which is only available since PostgreSQL 9.5. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() if row is None: row = {} if 'oid' in row: del row['oid'] # do not insert oid if 'oid' in kw: del kw['oid'] # do not update oid attnames = self.get_attnames(table) generated = self.get_generated(table) qoid = oid_key(table) if 'oid' in attnames else None params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier name_list, value_list = [], [] for n in attnames: if n in row and n not in generated: name_list.append(col(n)) value_list.append(adapt(row[n], attnames[n])) names, values = ', '.join(name_list), ', '.join(value_list) try: keynames = self.pkeys(table) except KeyError as e: raise prg_error(f'Table {table} has no primary key') from e target = ', '.join(col(k) for k in keynames) update = [] keyset = set(keynames) keyset.add('oid') for n in attnames: if n not in keyset and n not in generated: value = kw.get(n, n in row) if value: if not isinstance(value, str): value = f'excluded.{col(n)}' update.append(f'{col(n)} = {value}') if not values: return row do = 'update set ' + ', '.join(update) if update else 'nothing' ret = 'oid, *' if qoid else '*' t = self._escape_qualified_name(table) cmd = (f'INSERT INTO {t} AS included ({names})' # noqa: S608 f' VALUES ({values})' f' ON CONFLICT ({target}) DO {do} RETURNING {ret}') self._do_debug(cmd, params) query = self._valid_db.query(cmd, params) res = query.dictresult() if res: # may be empty with "do nothing" for n, value in res[0].items(): if qoid and n == 'oid': n = qoid row[n] = value else: self.get(table, row) return row def clear(self, table: str, row: dict[str, Any] | None = None ) -> dict[str, Any]: """Clear all the attributes to values determined by the types. Numeric types are set to 0, Booleans are set to false, and everything else is set to the empty string. If the row argument is present, it is used as the row dictionary and any entries matching attribute names are cleared with everything else left unchanged. """ # At some point we will need a way to get defaults from a table. if row is None: row = {} # empty if argument is not present attnames = self.get_attnames(table) for n, t in attnames.items(): if n == 'oid': continue t = t.simple if t in DbTypes._num_types: row[n] = 0 elif t == 'bool': row[n] = self._make_bool(False) else: row[n] = '' return row def delete(self, table: str, row: dict[str, Any] | None = None, **kw: Any ) -> int: """Delete an existing row in a database table. This method deletes the row from a table. It deletes based on the primary key of the table or the OID value as munged by get() or passed as keyword. The OID will take precedence if provided. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). Note that if the row cannot be deleted because e.g. it is still referenced by another table, this method raises a ProgrammingError. """ if table.endswith('*'): # hint for descendant tables can be ignored table = table[:-1].rstrip() attnames = self.get_attnames(table) qoid = oid_key(table) if 'oid' in attnames else None if row is None: row = {} elif 'oid' in row: del row['oid'] # only accept oid key from named args for safety row.update(kw) if qoid and qoid in row and 'oid' not in row: row['oid'] = row[qoid] if qoid and 'oid' in row: # try using the oid keynames: tuple[str, ...] = ('oid',) else: # try using the primary key try: keynames = self.pkeys(table) except KeyError as e: # the table has no primary key raise prg_error(f'Table {table} has no primary key') from e # check whether all key columns have values if not set(keynames).issubset(row): raise KeyError('Missing value for primary key in row') params = self.adapter.parameter_list() adapt = params.add col = self.escape_identifier where = ' AND '.join('{} OPERATOR(pg_catalog.=) {}'.format( col(k), adapt(row[k], attnames[k])) for k in keynames) if 'oid' in row: if qoid: row[qoid] = row['oid'] del row['oid'] t = self._escape_qualified_name(table) cmd = f'DELETE FROM {t} WHERE {where}' # noqa: S608 self._do_debug(cmd, params) res = self._valid_db.query(cmd, params) return int(res) # type: ignore def truncate(self, table: str | list[str] | tuple[str, ...] | set[str] | frozenset[str], restart: bool = False, cascade: bool = False, only: bool = False) -> Query: """Empty a table or set of tables. This method quickly removes all rows from the given table or set of tables. It has the same effect as an unqualified DELETE on each table, but since it does not actually scan the tables it is faster. Furthermore, it reclaims disk space immediately, rather than requiring a subsequent VACUUM operation. This is most useful on large tables. If restart is set to True, sequences owned by columns of the truncated table(s) are automatically restarted. If cascade is set to True, it also truncates all tables that have foreign-key references to any of the named tables. If the parameter 'only' is not set to True, all the descendant tables (if any) will also be truncated. Optionally, a '*' can be specified after the table name to explicitly indicate that descendant tables are included. """ if isinstance(table, str): table_only = {table: only} table = [table] elif isinstance(table, (list, tuple)): if isinstance(only, (list, tuple)): table_only = dict(zip(table, only)) else: table_only = dict.fromkeys(table, only) elif isinstance(table, (set, frozenset)): table_only = dict.fromkeys(table, only) else: raise TypeError('The table must be a string, list or set') if not (restart is None or isinstance(restart, (bool, int))): raise TypeError('Invalid type for the restart option') if not (cascade is None or isinstance(cascade, (bool, int))): raise TypeError('Invalid type for the cascade option') tables = [] for t in table: u = table_only.get(t) if not (u is None or isinstance(u, (bool, int))): raise TypeError('Invalid type for the only option') if t.endswith('*'): if u: raise ValueError( 'Contradictory table name and only options') t = t[:-1].rstrip() t = self._escape_qualified_name(t) if u: t = f'ONLY {t}' tables.append(t) cmd_parts = ['TRUNCATE', ', '.join(tables)] if restart: cmd_parts.append('RESTART IDENTITY') if cascade: cmd_parts.append('CASCADE') cmd = ' '.join(cmd_parts) self._do_debug(cmd) return self._valid_db.query(cmd) def get_as_list( self, table: str, what: str | list[str] | tuple[str, ...] | None = None, where: str | list[str] | tuple[str, ...] | None = None, order: str | list[str] | tuple[str, ...] | bool | None = None, limit: int | None = None, offset: int | None = None, scalar: bool = False) -> list: """Get a table as a list. This gets a convenient representation of the table as a list of named tuples in Python. You only need to pass the name of the table (or any other SQL expression returning rows). Note that by default this will return the full content of the table which can be huge and overflow your memory. However, you can control the amount of data returned using the other optional parameters. The parameter 'what' can restrict the query to only return a subset of the table columns. It can be a string, list or a tuple. The parameter 'where' can restrict the query to only return a subset of the table rows. It can be a string, list or a tuple of SQL expressions that all need to be fulfilled. The parameter 'order' specifies the ordering of the rows. It can also be a string, list or a tuple. If no ordering is specified, the result will be ordered by the primary key(s) or all columns if no primary key exists. You can set 'order' to False if you don't care about the ordering. The parameters 'limit' and 'offset' can be integers specifying the maximum number of rows returned and a number of rows skipped over. If you set the 'scalar' option to True, then instead of the named tuples you will get the first items of these tuples. This is useful if the result has only one column anyway. """ if not table: raise TypeError('The table name is missing') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' cmd_parts = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) cmd_parts.extend(['WHERE', where]) if order is None or order is True: try: order = self.pkeys(table) except (KeyError, ProgrammingError): with suppress(KeyError, ProgrammingError): order = list(self.get_attnames(table)) if order and not isinstance(order, bool): if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) cmd_parts.extend(['ORDER BY', order]) if limit: cmd_parts.append(f'LIMIT {limit}') if offset: cmd_parts.append(f'OFFSET {offset}') cmd = ' '.join(cmd_parts) self._do_debug(cmd) query = self._valid_db.query(cmd) res = query.namedresult() if res and scalar: res = [row[0] for row in res] return res def get_as_dict( self, table: str, keyname: str | list[str] | tuple[str, ...] | None = None, what: str | list[str] | tuple[str, ...] | None = None, where: str | list[str] | tuple[str, ...] | None = None, order: str | list[str] | tuple[str, ...] | bool | None = None, limit: int | None = None, offset: int | None = None, scalar: bool = False) -> dict: """Get a table as a dictionary. This method is similar to get_as_list(), but returns the table as a Python dict instead of a Python list, which can be even more convenient. The primary key column(s) of the table will be used as the keys of the dictionary, while the other column(s) will be the corresponding values. The keys will be named tuples if the table has a composite primary key. The rows will be also named tuples unless the 'scalar' option has been set to True. With the optional parameter 'keyname' you can specify an alternative set of columns to be used as the keys of the dictionary. It must be set as a string, list or a tuple. The dictionary will be ordered using the order specified with the 'order' parameter or the key column(s) if not specified. You can set 'order' to False if you don't care about the ordering. """ if not table: raise TypeError('The table name is missing') if not keyname: try: keyname = self.pkeys(table) except (KeyError, ProgrammingError) as e: raise prg_error(f'Table {table} has no primary key') from e if isinstance(keyname, str): keynames: list[str] | tuple[str, ...] = (keyname,) elif isinstance(keyname, (list, tuple)): keynames = keyname else: raise KeyError('The keyname must be a string, list or tuple') if what: if isinstance(what, (list, tuple)): what = ', '.join(map(str, what)) if order is None: order = what else: what = '*' cmd_parts = ['SELECT', what, 'FROM', table] if where: if isinstance(where, (list, tuple)): where = ' AND '.join(map(str, where)) cmd_parts.extend(['WHERE', where]) if order is None or order is True: order = keyname if order and not isinstance(order, bool): if isinstance(order, (list, tuple)): order = ', '.join(map(str, order)) cmd_parts.extend(['ORDER BY', order]) if limit: cmd_parts.append(f'LIMIT {limit}') if offset: cmd_parts.append(f'OFFSET {offset}') cmd = ' '.join(cmd_parts) self._do_debug(cmd) query = self._valid_db.query(cmd) res = query.getresult() if not res: return {} keyset = set(keynames) fields = query.listfields() if not keyset.issubset(fields): raise KeyError('Missing keyname in row') key_index: list[int] = [] row_index: list[int] = [] for i, f in enumerate(fields): (key_index if f in keyset else row_index).append(i) key_tuple = len(key_index) > 1 get_key = itemgetter(*key_index) keys = map(get_key, res) if scalar: row_index = row_index[:1] row_is_tuple = False else: row_is_tuple = len(row_index) > 1 if scalar or row_is_tuple: get_row: Callable[[tuple], tuple] = itemgetter( # pyright: ignore *row_index) else: frst_index = row_index[0] def get_row(row : tuple) -> tuple: return row[frst_index], # tuple with one item row_is_tuple = True rows = map(get_row, res) if key_tuple or row_is_tuple: if key_tuple: keys = namediter(_MemoryQuery(keys, keynames)) # type: ignore if row_is_tuple: fields = tuple(f for f in fields if f not in keyset) rows = namediter(_MemoryQuery(rows, fields)) # type: ignore # noinspection PyArgumentList return dict(zip(keys, rows)) def notification_handler(self, event: str, callback: Callable, arg_dict: dict | None = None, timeout: int | float | None = None, stop_event: str | None = None ) -> NotificationHandler: """Get notification handler that will run the given callback.""" return NotificationHandler(self, event, callback, arg_dict, timeout, stop_event) # immediately wrapped methods def send_query(self, cmd: str, args: Sequence | None = None) -> Query: """Create a new asynchronous query object for this connection.""" if args is None: return self._valid_db.send_query(cmd) return self._valid_db.send_query(cmd, args) def poll(self) -> int: """Complete an asynchronous connection and get its state.""" return self._valid_db.poll() def cancel(self) -> None: """Abandon processing of current SQL command.""" self._valid_db.cancel() def fileno(self) -> int: """Get the socket used to connect to the database.""" return self._valid_db.fileno() def get_cast_hook(self) -> Callable | None: """Get the function that handles all external typecasting.""" return self._valid_db.get_cast_hook() def set_cast_hook(self, hook: Callable | None) -> None: """Set a function that will handle all external typecasting.""" self._valid_db.set_cast_hook(hook) def get_notice_receiver(self) -> Callable | None: """Get the current notice receiver.""" return self._valid_db.get_notice_receiver() def set_notice_receiver(self, receiver: Callable | None) -> None: """Set a custom notice receiver.""" self._valid_db.set_notice_receiver(receiver) def getnotify(self) -> tuple[str, int, str] | None: """Get the last notify from the server.""" return self._valid_db.getnotify() def inserttable(self, table: str, values: Sequence[list|tuple], columns: list[str] | tuple[str, ...] | None = None) -> int: """Insert a Python iterable into a database table.""" if columns is None: return self._valid_db.inserttable(table, values) return self._valid_db.inserttable(table, values, columns) def transaction(self) -> int: """Get the current in-transaction status of the server. The status returned by this method can be TRANS_IDLE (currently idle), TRANS_ACTIVE (a command is in progress), TRANS_INTRANS (idle, in a valid transaction block), or TRANS_INERROR (idle, in a failed transaction block). TRANS_UNKNOWN is reported if the connection is bad. The status TRANS_ACTIVE is reported only when a query has been sent to the server and not yet completed. """ return self._valid_db.transaction() def parameter(self, name: str) -> str | None: """Look up a current parameter setting of the server.""" return self._valid_db.parameter(name) def date_format(self) -> str: """Look up the date format currently being used by the database.""" return self._valid_db.date_format() def escape_literal(self, s: AnyStr) -> AnyStr: """Escape a literal constant for use within SQL.""" return self._valid_db.escape_literal(s) def escape_identifier(self, s: AnyStr) -> AnyStr: """Escape an identifier for use within SQL.""" return self._valid_db.escape_identifier(s) def escape_string(self, s: AnyStr) -> AnyStr: """Escape a string for use within SQL.""" return self._valid_db.escape_string(s) def escape_bytea(self, s: AnyStr) -> AnyStr: """Escape binary data for use within SQL as type 'bytea'.""" return self._valid_db.escape_bytea(s) def putline(self, line: str) -> None: """Write a line to the server socket.""" self._valid_db.putline(line) def getline(self) -> str: """Get a line from server socket.""" return self._valid_db.getline() def endcopy(self) -> None: """Synchronize client and server.""" self._valid_db.endcopy() def set_non_blocking(self, nb: bool) -> None: """Set the non-blocking mode of the connection.""" self._valid_db.set_non_blocking(nb) def is_non_blocking(self) -> bool: """Get the non-blocking mode of the connection.""" return self._valid_db.is_non_blocking() def locreate(self, mode: int) -> LargeObject: """Create a large object in the database. The valid values for 'mode' parameter are defined as the module level constants INV_READ and INV_WRITE. """ return self._valid_db.locreate(mode) def getlo(self, oid: int) -> LargeObject: """Build a large object from given oid.""" return self._valid_db.getlo(oid) def loimport(self, filename: str) -> LargeObject: """Import a file to a large object.""" return self._valid_db.loimport(filename) class _MemoryQuery: """Class that embodies a given query result.""" result: Any fields: tuple[str, ...] def __init__(self, result: Any, fields: Sequence[str]) -> None: """Create query from given result rows and field names.""" self.result = result self.fields = tuple(fields) def listfields(self) -> tuple[str, ...]: """Return the stored field names of this query.""" return self.fields def getresult(self) -> Any: """Return the stored result of this query.""" return self.result def __iter__(self) -> Iterator[Any]: return iter(self.result)PyGreSQL-PyGreSQL-166b135/pg/error.py000066400000000000000000000022131450706350600171120ustar00rootroot00000000000000"""Error helpers.""" from __future__ import annotations from typing import TypeVar from .core import ( DatabaseError, Error, InterfaceError, InternalError, OperationalError, ProgrammingError, ) __all__ = [ 'error', 'db_error', 'if_error', 'int_error', 'op_error', 'prg_error' ] # Error messages E = TypeVar('E', bound=Error) def error(msg: str, cls: type[E]) -> E: """Return specified error object with empty sqlstate attribute.""" error = cls(msg) if isinstance(error, DatabaseError): error.sqlstate = None return error def db_error(msg: str) -> DatabaseError: """Return DatabaseError.""" return error(msg, DatabaseError) def int_error(msg: str) -> InternalError: """Return InternalError.""" return error(msg, InternalError) def prg_error(msg: str) -> ProgrammingError: """Return ProgrammingError.""" return error(msg, ProgrammingError) def if_error(msg: str) -> InterfaceError: """Return InterfaceError.""" return error(msg, InterfaceError) def op_error(msg: str) -> OperationalError: """Return OperationalError.""" return error(msg, OperationalError) PyGreSQL-PyGreSQL-166b135/pg/helpers.py000066400000000000000000000072201450706350600174260ustar00rootroot00000000000000"""Helper functions.""" from __future__ import annotations from collections import namedtuple from decimal import Decimal from functools import lru_cache from json import loads as jsondecode from typing import Any, Callable, Generator, NamedTuple, Sequence from .core import Query, set_decimal, set_jsondecode, set_query_helpers SomeNamedTuple = Any # alias for accessing arbitrary named tuples __all__ = [ 'quote_if_unqualified', 'oid_key', 'QuoteDict', 'RowCache', 'dictiter', 'namediter', 'namednext', 'scalariter' ] # Small helper functions def quote_if_unqualified(param: str, name: int | str) -> str: """Quote parameter representing a qualified name. Puts a quote_ident() call around the given parameter unless the name contains a dot, in which case the name is ambiguous (could be a qualified name or just a name with a dot in it) and must be quoted manually by the caller. """ if isinstance(name, str) and '.' not in name: return f'quote_ident({param})' return param def oid_key(table: str) -> str: """Build oid key from a table name.""" return f'oid({table})' class QuoteDict(dict): """Dictionary with auto quoting of its items. The quote attribute must be set to the desired quote function. """ quote: Callable[[str], str] def __getitem__(self, key: str) -> str: """Get a quoted value.""" return self.quote(super().__getitem__(key)) class RowCache: """Global cache for the named tuples used for table rows. The result rows for database operations are returned as named tuples by default. Since creating namedtuple classes is a somewhat expensive operation, we cache up to 1024 of these classes by default. """ @staticmethod @lru_cache(maxsize=1024) def row_factory(names: Sequence[str]) -> Callable[[Sequence], NamedTuple]: """Get a namedtuple factory for row results with the given names.""" try: return namedtuple('Row', names, rename=True)._make # type: ignore except ValueError: # there is still a problem with the field names names = [f'column_{n}' for n in range(len(names))] return namedtuple('Row', names)._make # type: ignore @classmethod def clear(cls) -> None: """Clear the namedtuple factory cache.""" cls.row_factory.cache_clear() @classmethod def change_size(cls, maxsize: int | None) -> None: """Change the size of the namedtuple factory cache. If maxsize is set to None, the cache can grow without bound. """ row_factory = cls.row_factory.__wrapped__ cls.row_factory = lru_cache(maxsize)(row_factory) # type: ignore # Helper functions used by the query object def dictiter(q: Query) -> Generator[dict[str, Any], None, None]: """Get query result as an iterator of dictionaries.""" fields: tuple[str, ...] = q.listfields() for r in q: yield dict(zip(fields, r)) def namediter(q: Query) -> Generator[SomeNamedTuple, None, None]: """Get query result as an iterator of named tuples.""" row = RowCache.row_factory(q.listfields()) for r in q: yield row(r) def namednext(q: Query) -> SomeNamedTuple: """Get next row from query result as a named tuple.""" return RowCache.row_factory(q.listfields())(next(q)) def scalariter(q: Query) -> Generator[Any, None, None]: """Get query result as an iterator of scalar values.""" for r in q: yield r[0] # Initialization def init_core() -> None: """Initialize the C extension module.""" set_decimal(Decimal) set_jsondecode(jsondecode) set_query_helpers(dictiter, namediter, namednext, scalariter) PyGreSQL-PyGreSQL-166b135/pg/notify.py000066400000000000000000000131251450706350600172750ustar00rootroot00000000000000"""Handling of notifications.""" from __future__ import annotations import select from typing import TYPE_CHECKING, Callable from .core import Query from .error import db_error if TYPE_CHECKING: from .db import DB __all__ = ['NotificationHandler'] # The notification handler class NotificationHandler: """A PostgreSQL client-side asynchronous notification handler.""" def __init__(self, db: DB, event: str, callback: Callable, arg_dict: dict | None = None, timeout: int | float | None = None, stop_event: str | None = None): """Initialize the notification handler. You must pass a PyGreSQL database connection, the name of an event (notification channel) to listen for and a callback function. You can also specify a dictionary arg_dict that will be passed as the single argument to the callback function, and a timeout value in seconds (a floating point number denotes fractions of seconds). If it is absent or None, the callers will never time out. If the timeout is reached, the callback function will be called with a single argument that is None. If you set the timeout to zero, the handler will poll notifications synchronously and return. You can specify the name of the event that will be used to signal the handler to stop listening as stop_event. By default, it will be the event name prefixed with 'stop_'. """ self.db: DB | None = db self.event = event self.stop_event = stop_event or f'stop_{event}' self.listening = False self.callback = callback if arg_dict is None: arg_dict = {} self.arg_dict = arg_dict self.timeout = timeout def __del__(self) -> None: """Delete the notification handler.""" self.unlisten() def close(self) -> None: """Stop listening and close the connection.""" if self.db: self.unlisten() self.db.close() self.db = None def listen(self) -> None: """Start listening for the event and the stop event.""" db = self.db if db and not self.listening: db.query(f'listen "{self.event}"') db.query(f'listen "{self.stop_event}"') self.listening = True def unlisten(self) -> None: """Stop listening for the event and the stop event.""" db = self.db if db and self.listening: db.query(f'unlisten "{self.event}"') db.query(f'unlisten "{self.stop_event}"') self.listening = False def notify(self, db: DB | None = None, stop: bool = False, payload: str | None = None) -> Query | None: """Generate a notification. Optionally, you can pass a payload with the notification. If you set the stop flag, a stop notification will be sent that will cause the handler to stop listening. Note: If the notification handler is running in another thread, you must pass a different database connection since PyGreSQL database connections are not thread-safe. """ if not self.listening: return None if not db: db = self.db if not db: return None event = self.stop_event if stop else self.event cmd = f'notify "{event}"' if payload: cmd += f", '{payload}'" return db.query(cmd) def __call__(self) -> None: """Invoke the notification handler. The handler is a loop that listens for notifications on the event and stop event channels. When either of these notifications are received, its associated 'pid', 'event' and 'extra' (the payload passed with the notification) are inserted into its arg_dict dictionary and the callback is invoked with this dictionary as a single argument. When the handler receives a stop event, it stops listening to both events and return. In the special case that the timeout of the handler has been set to zero, the handler will poll all events synchronously and return. If will keep listening until it receives a stop event. Note: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. """ if not self.db: return self.listen() poll = self.timeout == 0 rlist = [] if poll else [self.db.fileno()] while self.db and self.listening: # noinspection PyUnboundLocalVariable if poll or select.select(rlist, [], [], self.timeout)[0]: while self.db and self.listening: notice = self.db.getnotify() if not notice: # no more messages break event, pid, extra = notice if event not in (self.event, self.stop_event): self.unlisten() raise db_error( f'Listening for "{self.event}"' f' and "{self.stop_event}",' f' but notified of "{event}"') if event == self.stop_event: self.unlisten() self.arg_dict.update(pid=pid, event=event, extra=extra) self.callback(self.arg_dict) if poll: break else: # we timed out self.unlisten() self.callback(None)PyGreSQL-PyGreSQL-166b135/pg/py.typed000066400000000000000000000001751450706350600171130ustar00rootroot00000000000000# Marker file for PEP 561. # The pg package use inline types, # except for the _pg extension module which uses a stub file. PyGreSQL-PyGreSQL-166b135/pg/tz.py000066400000000000000000000011471450706350600164230ustar00rootroot00000000000000"""Timezone helpers.""" from __future__ import annotations __all__ = ['timezone_as_offset'] # time zones used in Postgres timestamptz output _timezone_offsets: dict[str, str] = { 'CET': '+0100', 'EET': '+0200', 'EST': '-0500', 'GMT': '+0000', 'HST': '-1000', 'MET': '+0100', 'MST': '-0700', 'UCT': '+0000', 'UTC': '+0000', 'WET': '+0000' } def timezone_as_offset(tz: str) -> str: """Convert timezone abbreviation to offset.""" if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezone_offsets.get(tz, '+0000')PyGreSQL-PyGreSQL-166b135/pgdb/000077500000000000000000000000001450706350600157175ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/pgdb/__init__.py000066400000000000000000000073531450706350600200400ustar00rootroot00000000000000#!/usr/bin/python # # PyGreSQL - a Python interface for the PostgreSQL database. # # This file contains the DB-API 2 compatible pgdb module. # # Copyright (c) 2023 by the PyGreSQL Development Team # # Please see the LICENSE.TXT file for specific restrictions. """pgdb - DB-API 2.0 compliant module for PyGreSQL. (c) 1999, Pascal Andre . See package documentation for further information on copyright. Inline documentation is sparse. See DB-API 2.0 specification for usage information: http://www.python.org/peps/pep-0249.html Basic usage: pgdb.connect(connect_string) # open a connection # connect_string = 'host:database:user:password:opt' # All parts are optional. You may also pass host through # password as keyword arguments. To pass a port, # pass it in the host keyword parameter: connection = pgdb.connect(host='localhost:5432') cursor = connection.cursor() # open a cursor cursor.execute(query[, params]) # Execute a query, binding params (a dictionary) if they are # passed. The binding syntax is the same as the % operator # for dictionaries, and no quoting is done. cursor.executemany(query, list of params) # Execute a query many times, binding each param dictionary # from the list. cursor.fetchone() # fetch one row, [value, value, ...] cursor.fetchall() # fetch all rows, [[value, value, ...], ...] cursor.fetchmany([size]) # returns size or cursor.arraysize number of rows, # [[value, value, ...], ...] from result set. # Default cursor.arraysize is 1. cursor.description # returns information about the columns # [(column_name, type_name, display_size, # internal_size, precision, scale, null_ok), ...] # Note that display_size, precision, scale and null_ok # are not implemented. cursor.rowcount # number of rows available in the result set # Available after a call to execute. connection.commit() # commit transaction connection.rollback() # or rollback transaction cursor.close() # close the cursor connection.close() # close the connection """ from pg.core import ( DatabaseError, DataError, Error, IntegrityError, InterfaceError, InternalError, NotSupportedError, OperationalError, ProgrammingError, Warning, version, ) from .adapt import ( ARRAY, BINARY, BOOL, DATE, DATETIME, FLOAT, HSTORE, INTEGER, INTERVAL, JSON, LONG, MONEY, NUMBER, NUMERIC, RECORD, ROWID, SMALLINT, STRING, TIME, TIMESTAMP, UUID, Binary, Date, DateFromTicks, DbType, Hstore, Interval, Json, Literal, Time, TimeFromTicks, Timestamp, TimestampFromTicks, Uuid, ) from .cast import get_typecast, reset_typecast, set_typecast from .connect import connect from .connection import Connection from .constants import apilevel, paramstyle, shortcutmethods, threadsafety from .cursor import Cursor __all__ = [ 'Connection', 'Cursor', 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', 'Binary', 'Interval', 'Uuid', 'Hstore', 'Json', 'Literal', 'DbType', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOL', 'SMALLINT', 'INTEGER', 'LONG', 'FLOAT', 'NUMERIC', 'MONEY', 'DATE', 'TIME', 'TIMESTAMP', 'INTERVAL', 'UUID', 'HSTORE', 'JSON', 'ARRAY', 'RECORD', 'Error', 'Warning', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError', 'IntegrityError', 'InternalError', 'ProgrammingError', 'NotSupportedError', 'get_typecast', 'set_typecast', 'reset_typecast', 'apilevel', 'connect', 'paramstyle', 'shortcutmethods', 'threadsafety', 'version', '__version__', ] __version__ = version PyGreSQL-PyGreSQL-166b135/pgdb/adapt.py000066400000000000000000000163111450706350600173640ustar00rootroot00000000000000"""Type helpers for adaptation of parameters.""" from __future__ import annotations from datetime import date, datetime, time, timedelta, tzinfo from json import dumps as jsonencode from re import compile as regex from time import localtime from typing import Any, Callable, Iterable from uuid import UUID as Uuid # noqa: N811 from .typecode import TypeCode __all__ = [ 'DbType', 'ArrayType', 'RecordType', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOL', 'SMALLINT', 'INTEGER', 'LONG', 'FLOAT', 'NUMERIC', 'MONEY', 'DATE', 'TIME', 'TIMESTAMP', 'INTERVAL', 'UUID', 'HSTORE', 'JSON', 'ARRAY', 'RECORD', 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks' ] class DbType(frozenset): """Type class for a couple of PostgreSQL data types. PostgreSQL is object-oriented: types are dynamic. We must thus use type names as internal type codes. """ def __new__(cls, values: str | Iterable[str]) -> DbType: """Create new type object.""" if isinstance(values, str): values = values.split() return super().__new__(cls, values) # type: ignore def __eq__(self, other: Any) -> bool: """Check whether types are considered equal.""" if isinstance(other, str): if other.startswith('_'): other = other[1:] return other in self return super().__eq__(other) def __ne__(self, other: Any) -> bool: """Check whether types are not considered equal.""" if isinstance(other, str): if other.startswith('_'): other = other[1:] return other not in self return super().__ne__(other) class ArrayType: """Type class for PostgreSQL array types.""" def __eq__(self, other: Any) -> bool: """Check whether arrays are equal.""" if isinstance(other, str): return other.startswith('_') return isinstance(other, ArrayType) def __ne__(self, other: Any) -> bool: """Check whether arrays are different.""" if isinstance(other, str): return not other.startswith('_') return not isinstance(other, ArrayType) class RecordType: """Type class for PostgreSQL record types.""" def __eq__(self, other: Any) -> bool: """Check whether records are equal.""" if isinstance(other, TypeCode): return other.type == 'c' if isinstance(other, str): return other == 'record' return isinstance(other, RecordType) def __ne__(self, other: Any) -> bool: """Check whether records are different.""" if isinstance(other, TypeCode): return other.type != 'c' if isinstance(other, str): return other != 'record' return not isinstance(other, RecordType) # Mandatory type objects defined by DB-API 2 specs: STRING = DbType('char bpchar name text varchar') BINARY = DbType('bytea') NUMBER = DbType('int2 int4 serial int8 float4 float8 numeric money') DATETIME = DbType('date time timetz timestamp timestamptz interval' ' abstime reltime') # these are very old ROWID = DbType('oid') # Additional type objects (more specific): BOOL = DbType('bool') SMALLINT = DbType('int2') INTEGER = DbType('int2 int4 int8 serial') LONG = DbType('int8') FLOAT = DbType('float4 float8') NUMERIC = DbType('numeric') MONEY = DbType('money') DATE = DbType('date') TIME = DbType('time timetz') TIMESTAMP = DbType('timestamp timestamptz') INTERVAL = DbType('interval') UUID = DbType('uuid') HSTORE = DbType('hstore') JSON = DbType('json jsonb') # Type object for arrays (also equate to their base types): ARRAY = ArrayType() # Type object for records (encompassing all composite types): RECORD = RecordType() # Mandatory type helpers defined by DB-API 2 specs: def Date(year: int, month: int, day: int) -> date: # noqa: N802 """Construct an object holding a date value.""" return date(year, month, day) def Time(hour: int, minute: int = 0, # noqa: N802 second: int = 0, microsecond: int = 0, tzinfo: tzinfo | None = None) -> time: """Construct an object holding a time value.""" return time(hour, minute, second, microsecond, tzinfo) def Timestamp(year: int, month: int, day: int, # noqa: N802 hour: int = 0, minute: int = 0, second: int = 0, microsecond: int = 0, tzinfo: tzinfo | None = None) -> datetime: """Construct an object holding a time stamp value.""" return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) def DateFromTicks(ticks: float | None) -> date: # noqa: N802 """Construct an object holding a date value from the given ticks value.""" return Date(*localtime(ticks)[:3]) def TimeFromTicks(ticks: float | None) -> time: # noqa: N802 """Construct an object holding a time value from the given ticks value.""" return Time(*localtime(ticks)[3:6]) def TimestampFromTicks(ticks: float | None) -> datetime: # noqa: N802 """Construct an object holding a time stamp from the given ticks value.""" return Timestamp(*localtime(ticks)[:6]) class Binary(bytes): """Construct an object capable of holding a binary (long) string value.""" # Additional type helpers for PyGreSQL: def Interval(days: int | float, # noqa: N802 hours: int | float = 0, minutes: int | float = 0, seconds: int | float = 0, microseconds: int | float = 0 ) -> timedelta: """Construct an object holding a time interval value.""" return timedelta(days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds) Uuid = Uuid # Construct an object holding a UUID value class Hstore(dict): """Wrapper class for marking hstore values.""" _re_quote = regex('^[Nn][Uu][Ll][Ll]$|[ ,=>]') _re_escape = regex(r'(["\\])') @classmethod def _quote(cls, s: Any) -> Any: if s is None: return 'NULL' if not isinstance(s, str): s = str(s) if not s: return '""' quote = cls._re_quote.search(s) s = cls._re_escape.sub(r'\\\1', s) if quote: s = f'"{s}"' return s def __str__(self) -> str: """Create a printable representation of the hstore value.""" q = self._quote return ','.join(f'{q(k)}=>{q(v)}' for k, v in self.items()) class Json: """Construct a wrapper for holding an object serializable to JSON.""" def __init__(self, obj: Any, encode: Callable[[Any], str] | None = None) -> None: """Initialize the JSON object.""" self.obj = obj self.encode = encode or jsonencode def __str__(self) -> str: """Create a printable representation of the JSON object.""" obj = self.obj if isinstance(obj, str): return obj return self.encode(obj) class Literal: """Construct a wrapper for holding a literal SQL string.""" def __init__(self, sql: str) -> None: """Initialize literal SQL string.""" self.sql = sql def __str__(self) -> str: """Return a printable representation of the SQL string.""" return self.sql __pg_repr__ = __str__PyGreSQL-PyGreSQL-166b135/pgdb/cast.py000066400000000000000000000502111450706350600172220ustar00rootroot00000000000000"""Internal type handling.""" from __future__ import annotations from collections import namedtuple from datetime import date, datetime, time, timedelta from decimal import Decimal as _Decimal from functools import partial from inspect import signature from json import loads as jsondecode from re import compile as regex from typing import Any, Callable, ClassVar, Sequence from uuid import UUID as Uuid # noqa: N811 from pg.core import Connection as Cnx from pg.core import ( ProgrammingError, cast_array, cast_hstore, cast_record, unescape_bytea, ) from .typecode import TypeCode __all__ = [ 'Decimal', 'decimal_type', 'cast_bool', 'cast_money', 'cast_int2vector', 'cast_date', 'cast_time', 'cast_interval', 'cast_timetz', 'cast_timestamp', 'cast_timestamptz', 'get_typecast', 'set_typecast', 'reset_typecast', 'Typecasts', 'LocalTypecasts', 'TypeCache', 'FieldInfo' ] Decimal: type = _Decimal def get_args(func: Callable) -> list: return list(signature(func).parameters) # time zones used in Postgres timestamptz output _timezones: dict[str, str] = { 'CET': '+0100', 'EET': '+0200', 'EST': '-0500', 'GMT': '+0000', 'HST': '-1000', 'MET': '+0100', 'MST': '-0700', 'UCT': '+0000', 'UTC': '+0000', 'WET': '+0000' } def _timezone_as_offset(tz: str) -> str: if tz.startswith(('+', '-')): if len(tz) < 5: return tz + '00' return tz.replace(':', '') return _timezones.get(tz, '+0000') def decimal_type(decimal_type: type | None = None) -> type: """Get or set global type to be used for decimal values. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ global Decimal if decimal_type is not None: Decimal = decimal_type set_typecast('numeric', decimal_type) return Decimal def cast_bool(value: str) -> bool | None: """Cast boolean value in database format to bool.""" return value[0] in ('t', 'T') if value else None def cast_money(value: str) -> _Decimal | None: """Cast money value in database format to Decimal.""" if not value: return None value = value.replace('(', '-') return Decimal(''.join(c for c in value if c.isdigit() or c in '.-')) def cast_int2vector(value: str) -> list[int]: """Cast an int2vector value.""" return [int(v) for v in value.split()] def cast_date(value: str, cnx: Cnx) -> date: """Cast a date value.""" # The output format depends on the server setting DateStyle. The default # setting ISO and the setting for German are actually unambiguous. The # order of days and months in the other two settings is however ambiguous, # so at least here we need to consult the setting to properly parse values. if value == '-infinity': return date.min if value == 'infinity': return date.max values = value.split() if values[-1] == 'BC': return date.min value = values[0] if len(value) > 10: return date.max format = cnx.date_format() return datetime.strptime(value, format).date() def cast_time(value: str) -> time: """Cast a time value.""" fmt = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' return datetime.strptime(value, fmt).time() _re_timezone = regex('(.*)([+-].*)') def cast_timetz(value: str) -> time: """Cast a timetz value.""" m = _re_timezone.match(value) if m: value, tz = m.groups() else: tz = '+0000' format = '%H:%M:%S.%f' if len(value) > 8 else '%H:%M:%S' value += _timezone_as_offset(tz) format += '%z' return datetime.strptime(value, format).timetz() def cast_timestamp(value: str, cnx: Cnx) -> datetime: """Cast a timestamp value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max values = value.split() if values[-1] == 'BC': return datetime.min format = cnx.date_format() if format.endswith('-%Y') and len(values) > 2: values = values[1:5] if len(values[3]) > 4: return datetime.max formats = ['%d %b' if format.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(values[2]) > 8 else '%H:%M:%S', '%Y'] else: if len(values[0]) > 10: return datetime.max formats = [format, '%H:%M:%S.%f' if len(values[1]) > 8 else '%H:%M:%S'] return datetime.strptime(' '.join(values), ' '.join(formats)) def cast_timestamptz(value: str, cnx: Cnx) -> datetime: """Cast a timestamptz value.""" if value == '-infinity': return datetime.min if value == 'infinity': return datetime.max values = value.split() if values[-1] == 'BC': return datetime.min format = cnx.date_format() if format.endswith('-%Y') and len(values) > 2: values = values[1:] if len(values[3]) > 4: return datetime.max formats = ['%d %b' if format.startswith('%d') else '%b %d', '%H:%M:%S.%f' if len(values[2]) > 8 else '%H:%M:%S', '%Y'] values, tz = values[:-1], values[-1] else: if format.startswith('%Y-'): m = _re_timezone.match(values[1]) if m: values[1], tz = m.groups() else: tz = '+0000' else: values, tz = values[:-1], values[-1] if len(values[0]) > 10: return datetime.max formats = [format, '%H:%M:%S.%f' if len(values[1]) > 8 else '%H:%M:%S'] values.append(_timezone_as_offset(tz)) formats.append('%z') return datetime.strptime(' '.join(values), ' '.join(formats)) _re_interval_sql_standard = regex( '(?:([+-])?([0-9]+)-([0-9]+) ?)?' '(?:([+-]?[0-9]+)(?!:) ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres = regex( '(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-])?([0-9]+):([0-9]+):([0-9]+)(?:\\.([0-9]+))?)?') _re_interval_postgres_verbose = regex( '@ ?(?:([+-]?[0-9]+) ?years? ?)?' '(?:([+-]?[0-9]+) ?mons? ?)?' '(?:([+-]?[0-9]+) ?days? ?)?' '(?:([+-]?[0-9]+) ?hours? ?)?' '(?:([+-]?[0-9]+) ?mins? ?)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))? ?secs?)? ?(ago)?') _re_interval_iso_8601 = regex( 'P(?:([+-]?[0-9]+)Y)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-]?[0-9]+)D)?' '(?:T(?:([+-]?[0-9]+)H)?' '(?:([+-]?[0-9]+)M)?' '(?:([+-])?([0-9]+)(?:\\.([0-9]+))?S)?)?') def cast_interval(value: str) -> timedelta: """Cast an interval value.""" # The output format depends on the server setting IntervalStyle, but it's # not necessary to consult this setting to parse it. It's faster to just # check all possible formats, and there is no ambiguity here. m = _re_interval_iso_8601.match(value) if m: s = [v or '0' for v in m.groups()] secs_ago = s.pop(5) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if secs_ago: secs = -secs usecs = -usecs else: m = _re_interval_postgres_verbose.match(value) if m: s, ago = [v or '0' for v in m.groups()[:8]], m.group(9) secs_ago = s.pop(5) == '-' d = [-int(v) for v in s] if ago else [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if secs_ago: secs = - secs usecs = -usecs else: m = _re_interval_postgres.match(value) if m and any(m.groups()): s = [v or '0' for v in m.groups()] hours_ago = s.pop(3) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: m = _re_interval_sql_standard.match(value) if m and any(m.groups()): s = [v or '0' for v in m.groups()] years_ago = s.pop(0) == '-' hours_ago = s.pop(3) == '-' d = [int(v) for v in s] years, mons, days, hours, mins, secs, usecs = d if years_ago: years = -years mons = -mons if hours_ago: hours = -hours mins = -mins secs = -secs usecs = -usecs else: raise ValueError(f'Cannot parse interval: {value}') days += 365 * years + 30 * mons return timedelta(days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) class Typecasts(dict): """Dictionary mapping database types to typecast functions. The cast functions get passed the string representation of a value in the database which they need to convert to a Python object. The passed string will never be None since NULL values are already handled before the cast function is called. """ # the default cast functions # (str functions are ignored but have been added for faster access) defaults: ClassVar[dict[str, Callable]] = { 'char': str, 'bpchar': str, 'name': str, 'text': str, 'varchar': str, 'sql_identifier': str, 'bool': cast_bool, 'bytea': unescape_bytea, 'int2': int, 'int4': int, 'serial': int, 'int8': int, 'oid': int, 'hstore': cast_hstore, 'json': jsondecode, 'jsonb': jsondecode, 'float4': float, 'float8': float, 'numeric': Decimal, 'money': cast_money, 'date': cast_date, 'interval': cast_interval, 'time': cast_time, 'timetz': cast_timetz, 'timestamp': cast_timestamp, 'timestamptz': cast_timestamptz, 'int2vector': cast_int2vector, 'uuid': Uuid, 'anyarray': cast_array, 'record': cast_record} cnx: Cnx | None = None # for local connection specific instances def __missing__(self, typ: str) -> Callable | None: """Create a cast function if it is not cached. Note that this class never raises a KeyError, but returns None when no special cast function exists. """ if not isinstance(typ, str): raise TypeError(f'Invalid type: {typ}') cast = self.defaults.get(typ) if cast: # store default for faster access cast = self._add_connection(cast) self[typ] = cast elif typ.startswith('_'): # create array cast base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: # store only if base type exists self[typ] = cast return cast @staticmethod def _needs_connection(func: Callable) -> bool: """Check if a typecast function needs a connection argument.""" try: args = get_args(func) except (TypeError, ValueError): return False return 'cnx' in args[1:] def _add_connection(self, cast: Callable) -> Callable: """Add a connection argument to the typecast function if necessary.""" if not self.cnx or not self._needs_connection(cast): return cast return partial(cast, cnx=self.cnx) def get(self, typ: str, default: Callable | None = None # type: ignore ) -> Callable | None: """Get the typecast function for the given database type.""" return self[typ] or default def set(self, typ: str | Sequence[str], cast: Callable | None) -> None: """Set a typecast function for the specified database type(s).""" if isinstance(typ, str): typ = [typ] if cast is None: for t in typ: self.pop(t, None) self.pop(f'_{t}', None) else: if not callable(cast): raise TypeError("Cast parameter must be callable") for t in typ: self[t] = self._add_connection(cast) self.pop(f'_{t}', None) def reset(self, typ: str | Sequence[str] | None = None) -> None: """Reset the typecasts for the specified type(s) to their defaults. When no type is specified, all typecasts will be reset. """ defaults = self.defaults if typ is None: self.clear() self.update(defaults) else: if isinstance(typ, str): typ = [typ] for t in typ: cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) t = f'_{t}' cast = defaults.get(t) if cast: self[t] = self._add_connection(cast) else: self.pop(t, None) else: self.pop(t, None) self.pop(f'_{t}', None) def create_array_cast(self, basecast: Callable) -> Callable: """Create an array typecast for the given base cast.""" cast_array = self['anyarray'] def cast(v: Any) -> list: return cast_array(v, basecast) return cast def create_record_cast(self, name: str, fields: Sequence[str], casts: Sequence[str]) -> Callable: """Create a named record typecast for the given fields and casts.""" cast_record = self['record'] record = namedtuple(name, fields) # type: ignore def cast(v: Any) -> record: # noinspection PyArgumentList return record(*cast_record(v, casts)) return cast _typecasts = Typecasts() # this is the global typecast dictionary def get_typecast(typ: str) -> Callable | None: """Get the global typecast function for the given database type.""" return _typecasts.get(typ) def set_typecast(typ: str | Sequence[str], cast: Callable | None) -> None: """Set a global typecast function for the given database type(s). Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.set(typ, cast) def reset_typecast(typ: str | Sequence[str] | None = None) -> None: """Reset the global typecasts for the given type(s) to their default. When no type is specified, all typecasts will be reset. Note that connections cache cast functions. To be sure a global change is picked up by a running connection, call con.type_cache.reset_typecast(). """ _typecasts.reset(typ) class LocalTypecasts(Typecasts): """Map typecasts, including local composite types, to cast functions.""" defaults = _typecasts cnx: Cnx | None = None # set in connection specific instances def __missing__(self, typ: str) -> Callable | None: """Create a cast function if it is not cached.""" cast: Callable | None if typ.startswith('_'): base_cast = self[typ[1:]] cast = self.create_array_cast(base_cast) if base_cast: self[typ] = cast else: cast = self.defaults.get(typ) if cast: cast = self._add_connection(cast) self[typ] = cast else: fields = self.get_fields(typ) if fields: casts = [self[field.type] for field in fields] field_names = [field.name for field in fields] cast = self.create_record_cast(typ, field_names, casts) self[typ] = cast return cast # noinspection PyMethodMayBeStatic,PyUnusedLocal def get_fields(self, typ: str) -> list[FieldInfo]: """Return the fields for the given record type. This method will be replaced with a method that looks up the fields using the type cache of the connection. """ return [] FieldInfo = namedtuple('FieldInfo', ('name', 'type')) class TypeCache(dict): """Cache for database types. This cache maps type OIDs and names to TypeCode strings containing important information on the associated database type. """ def __init__(self, cnx: Cnx) -> None: """Initialize type cache for connection.""" super().__init__() self._escape_string = cnx.escape_string self._src = cnx.source() self._typecasts = LocalTypecasts() self._typecasts.get_fields = self.get_fields # type: ignore self._typecasts.cnx = cnx self._query_pg_type = ( "SELECT oid, typname," " typlen, typtype, typcategory, typdelim, typrelid" " FROM pg_catalog.pg_type WHERE oid OPERATOR(pg_catalog.=) {}") def __missing__(self, key: int | str) -> TypeCode: """Get the type info from the database if it is not cached.""" oid: int | str if isinstance(key, int): oid = key else: if '.' not in key and '"' not in key: key = f'"{key}"' oid = f"'{self._escape_string(key)}'::pg_catalog.regtype" try: self._src.execute(self._query_pg_type.format(oid)) except ProgrammingError: res = None else: res = self._src.fetch(1) if not res: raise KeyError(f'Type {key} could not be found') r = res[0] type_code = TypeCode.create( int(r[0]), r[1], int(r[2]), r[3], r[4], r[5], int(r[6])) # noinspection PyUnresolvedReferences self[type_code.oid] = self[str(type_code)] = type_code return type_code def get(self, key: int | str, # type: ignore default: TypeCode | None = None) -> TypeCode | None: """Get the type even if it is not cached.""" try: return self[key] except KeyError: return default def get_fields(self, typ: int | str | TypeCode) -> list[FieldInfo] | None: """Get the names and types of the fields of composite types.""" if isinstance(typ, TypeCode): relid = typ.relid else: type_code = self.get(typ) if not type_code: return None relid = type_code.relid if not relid: return None # this type is not composite self._src.execute( "SELECT attname, atttypid" # noqa: S608 " FROM pg_catalog.pg_attribute" f" WHERE attrelid OPERATOR(pg_catalog.=) {relid}" " AND attnum OPERATOR(pg_catalog.>) 0" " AND NOT attisdropped ORDER BY attnum") return [FieldInfo(name, self.get(int(oid))) for name, oid in self._src.fetch(-1)] def get_typecast(self, typ: str) -> Callable | None: """Get the typecast function for the given database type.""" return self._typecasts[typ] def set_typecast(self, typ: str | Sequence[str], cast: Callable | None) -> None: """Set a typecast function for the specified database type(s).""" self._typecasts.set(typ, cast) def reset_typecast(self, typ: str | Sequence[str] | None = None) -> None: """Reset the typecast function for the specified database type(s).""" self._typecasts.reset(typ) def typecast(self, value: Any, typ: str) -> Any: """Cast the given value according to the given database type.""" if value is None: # for NULL values, no typecast is necessary return None cast = self._typecasts[typ] if cast is None or cast is str: # no typecast is necessary return value return cast(value) def get_row_caster(self, types: Sequence[str]) -> Callable: """Get a typecast function for a complete row of values.""" typecasts = self._typecasts casts = [typecasts[typ] for typ in types] casts = [cast if cast is not str else None for cast in casts] def row_caster(row: Sequence) -> Sequence: return [value if cast is None or value is None else cast(value) for cast, value in zip(casts, row)] return row_casterPyGreSQL-PyGreSQL-166b135/pgdb/connect.py000066400000000000000000000041341450706350600177240ustar00rootroot00000000000000"""The DB API 2 connect function.""" from __future__ import annotations from typing import Any from pg.core import connect as get_cnx from .connection import Connection __all__ = ['connect'] def connect(dsn: str | None = None, user: str | None = None, password: str | None = None, host: str | None = None, database: str | None = None, **kwargs: Any) -> Connection: """Connect to a database.""" # first get params from DSN dbport = -1 dbhost: str | None = "" dbname: str | None = "" dbuser: str | None = "" dbpasswd: str | None = "" dbopt: str | None = "" if dsn: try: params = dsn.split(":", 4) dbhost = params[0] dbname = params[1] dbuser = params[2] dbpasswd = params[3] dbopt = params[4] except (AttributeError, IndexError, TypeError): pass # override if necessary if user is not None: dbuser = user if password is not None: dbpasswd = password if database is not None: dbname = database if host: try: params = host.split(":", 1) dbhost = params[0] dbport = int(params[1]) except (AttributeError, IndexError, TypeError, ValueError): pass # empty host is localhost if dbhost == "": dbhost = None if dbuser == "": dbuser = None # pass keyword arguments as connection info string if kwargs: kwarg_list = list(kwargs.items()) kw_parts = [] if dbname and '=' in dbname: kw_parts.append(dbname) else: kwarg_list.insert(0, ('dbname', dbname)) for kw, value in kwarg_list: value = str(value) if not value or ' ' in value: value = value.replace('\\', '\\\\').replace("'", "\\'") value = f"'{value}'" kw_parts.append(f'{kw}={value}') dbname = ' '.join(kw_parts) # open the connection cnx = get_cnx(dbname, dbhost, dbport, dbopt, dbuser, dbpasswd) return Connection(cnx) PyGreSQL-PyGreSQL-166b135/pgdb/connection.py000066400000000000000000000117251450706350600204360ustar00rootroot00000000000000"""The DB API 2 Connection objects.""" from __future__ import annotations from contextlib import suppress from typing import Any, Sequence from pg.core import Connection as Cnx from pg.core import ( DatabaseError, DataError, Error, IntegrityError, InterfaceError, InternalError, NotSupportedError, OperationalError, ProgrammingError, Warning, ) from pg.error import op_error from .cast import TypeCache from .constants import shortcutmethods from .cursor import Cursor __all__ = ['Connection'] class Connection: """Connection object.""" # expose the exceptions as attributes on the connection object Error = Error Warning = Warning InterfaceError = InterfaceError DatabaseError = DatabaseError InternalError = InternalError OperationalError = OperationalError ProgrammingError = ProgrammingError IntegrityError = IntegrityError DataError = DataError NotSupportedError = NotSupportedError def __init__(self, cnx: Cnx) -> None: """Create a database connection object.""" self._cnx: Cnx | None = cnx # connection self._tnx = False # transaction state self.type_cache = TypeCache(cnx) self.cursor_type = Cursor self.autocommit = False try: self._cnx.source() except Exception as e: raise op_error("Invalid connection") from e def __enter__(self) -> Connection: """Enter the runtime context for the connection object. The runtime context can be used for running transactions. This also starts a transaction in autocommit mode. """ if self.autocommit: cnx = self._cnx if not cnx: raise op_error("Connection has been closed") try: cnx.source().execute("BEGIN") except DatabaseError: raise # database provides error message except Exception as e: raise op_error("Can't start transaction") from e else: self._tnx = True return self def __exit__(self, et: type[BaseException] | None, ev: BaseException | None, tb: Any) -> None: """Exit the runtime context for the connection object. This does not close the connection, but it ends a transaction. """ if et is None and ev is None and tb is None: self.commit() else: self.rollback() def close(self) -> None: """Close the connection object.""" if not self._cnx: raise op_error("Connection has been closed") if self._tnx: with suppress(DatabaseError): self.rollback() self._cnx.close() self._cnx = None @property def closed(self) -> bool: """Check whether the connection has been closed or is broken.""" try: return not self._cnx or self._cnx.status != 1 except TypeError: return True def commit(self) -> None: """Commit any pending transaction to the database.""" if not self._cnx: raise op_error("Connection has been closed") if self._tnx: self._tnx = False try: self._cnx.source().execute("COMMIT") except DatabaseError: raise # database provides error message except Exception as e: raise op_error("Can't commit transaction") from e def rollback(self) -> None: """Roll back to the start of any pending transaction.""" if not self._cnx: raise op_error("Connection has been closed") if self._tnx: self._tnx = False try: self._cnx.source().execute("ROLLBACK") except DatabaseError: raise # database provides error message except Exception as e: raise op_error("Can't rollback transaction") from e def cursor(self) -> Cursor: """Return a new cursor object using the connection.""" if not self._cnx: raise op_error("Connection has been closed") try: return self.cursor_type(self) except Exception as e: raise op_error("Invalid connection") from e if shortcutmethods: # otherwise do not implement and document this def execute(self, operation: str, parameters: Sequence | None = None) -> Cursor: """Shortcut method to run an operation on an implicit cursor.""" cursor = self.cursor() cursor.execute(operation, parameters) return cursor def executemany(self, operation: str, seq_of_parameters: Sequence[Sequence | None] ) -> Cursor: """Shortcut method to run an operation against a sequence.""" cursor = self.cursor() cursor.executemany(operation, seq_of_parameters) return cursorPyGreSQL-PyGreSQL-166b135/pgdb/constants.py000066400000000000000000000005411450706350600203050ustar00rootroot00000000000000"""The DB API 2 module constants.""" # compliant with DB API 2.0 apilevel = '2.0' # module may be shared, but not connections threadsafety = 1 # this module use extended python format codes paramstyle = 'pyformat' # shortcut methods have been excluded from DB API 2 and # are not recommended by the DB SIG, but they can be handy shortcutmethods = 1 PyGreSQL-PyGreSQL-166b135/pgdb/cursor.py000066400000000000000000000621401450706350600176110ustar00rootroot00000000000000"""The DB API 2 Cursor object.""" from __future__ import annotations from collections import namedtuple from collections.abc import Iterable from datetime import date, datetime, time, timedelta from decimal import Decimal from math import isinf, isnan from typing import TYPE_CHECKING, Any, Callable, Generator, Mapping, Sequence from uuid import UUID as Uuid # noqa: N811 from pg.core import ( RESULT_DQL, DatabaseError, Error, InterfaceError, NotSupportedError, ) from pg.core import Connection as Cnx from pg.error import db_error, if_error, op_error from pg.helpers import QuoteDict, RowCache from .adapt import Binary, Hstore, Json, Literal from .cast import TypeCache from .typecode import TypeCode if TYPE_CHECKING: from .connection import Connection __all__ = ['Cursor', 'CursorDescription'] class Cursor: """Cursor object.""" def __init__(self, connection: Connection) -> None: """Create a cursor object for the database connection.""" self.connection = self._connection = connection cnx = connection._cnx if not cnx: raise op_error("Connection has been closed") self._cnx: Cnx = cnx self.type_cache: TypeCache = connection.type_cache self._src = self._cnx.source() # the official attribute for describing the result columns self._description: list[CursorDescription] | bool | None = None if self.row_factory is Cursor.row_factory: # the row factory needs to be determined dynamically self.row_factory = None # type: ignore else: self.build_row_factory = None # type: ignore self.rowcount: int | None = -1 self.arraysize: int = 1 self.lastrowid: int | None = None def __iter__(self) -> Cursor: """Make cursor compatible to the iteration protocol.""" return self def __enter__(self) -> Cursor: """Enter the runtime context for the cursor object.""" return self def __exit__(self, et: type[BaseException] | None, ev: BaseException | None, tb: Any) -> None: """Exit the runtime context for the cursor object.""" self.close() def _quote(self, value: Any) -> Any: """Quote value depending on its type.""" if value is None: return 'NULL' if isinstance(value, (Hstore, Json)): value = str(value) if isinstance(value, (bytes, str)): cnx = self._cnx if isinstance(value, Binary): value = cnx.escape_bytea(value).decode('ascii') else: value = cnx.escape_string(value) return f"'{value}'" if isinstance(value, float): if isinf(value): return "'-Infinity'" if value < 0 else "'Infinity'" if isnan(value): return "'NaN'" return value if isinstance(value, (int, Decimal, Literal)): return value if isinstance(value, datetime): if value.tzinfo: return f"'{value}'::timestamptz" return f"'{value}'::timestamp" if isinstance(value, date): return f"'{value}'::date" if isinstance(value, time): if value.tzinfo: return f"'{value}'::timetz" return f"'{value}'::time" if isinstance(value, timedelta): return f"'{value}'::interval" if isinstance(value, Uuid): return f"'{value}'::uuid" if isinstance(value, list): # Quote value as an ARRAY constructor. This is better than using # an array literal because it carries the information that this is # an array and not a string. One issue with this syntax is that # you need to add an explicit typecast when passing empty arrays. # The ARRAY keyword is actually only necessary at the top level. if not value: # exception for empty array return "'{}'" q = self._quote v = ','.join(str(q(v)) for v in value) return f'ARRAY[{v}]' if isinstance(value, tuple): # Quote as a ROW constructor. This is better than using a record # literal because it carries the information that this is a record # and not a string. We don't use the keyword ROW in order to make # this usable with the IN syntax as well. It is only necessary # when the records has a single column which is not really useful. q = self._quote v = ','.join(str(q(v)) for v in value) return f'({v})' try: # noinspection PyUnresolvedReferences value = value.__pg_repr__() except AttributeError as e: raise InterfaceError( f'Do not know how to adapt type {type(value)}') from e if isinstance(value, (tuple, list)): value = self._quote(value) return value def _quoteparams(self, string: str, parameters: Mapping | Sequence | None) -> str: """Quote parameters. This function works for both mappings and sequences. The function should be used even when there are no parameters, so that we have a consistent behavior regarding percent signs. """ if not parameters: try: return string % () # unescape literal quotes if possible except (TypeError, ValueError): return string # silently accept unescaped quotes if isinstance(parameters, dict): parameters = QuoteDict(parameters) parameters.quote = self._quote else: parameters = tuple(map(self._quote, parameters)) return string % parameters def _make_description(self, info: tuple[int, str, int, int, int] ) -> CursorDescription: """Make the description tuple for the given field info.""" name, typ, size, mod = info[1:] type_code = self.type_cache[typ] if mod > 0: mod -= 4 precision: int | None scale: int | None if type_code == 'numeric': precision, scale = mod >> 16, mod & 0xffff size = precision else: if not size: size = type_code.size if size == -1: size = mod precision = scale = None return CursorDescription( name, type_code, None, size, precision, scale, None) @property def description(self) -> list[CursorDescription] | None: """Read-only attribute describing the result columns.""" description = self._description if description is None: return None if not isinstance(description, list): make = self._make_description description = [make(info) for info in self._src.listinfo()] self._description = description return description @property def colnames(self) -> Sequence[str] | None: """Unofficial convenience method for getting the column names.""" description = self.description return None if description is None else [d[0] for d in description] @property def coltypes(self) -> Sequence[TypeCode] | None: """Unofficial convenience method for getting the column types.""" description = self.description return None if description is None else [d[1] for d in description] def close(self) -> None: """Close the cursor object.""" self._src.close() def execute(self, operation: str, parameters: Sequence | None = None ) -> Cursor: """Prepare and execute a database operation (query or command).""" # The parameters may also be specified as list of tuples to e.g. # insert multiple rows in a single operation, but this kind of # usage is deprecated. We make several plausibility checks because # tuples can also be passed with the meaning of ROW constructors. if (parameters and isinstance(parameters, list) and len(parameters) > 1 and all(isinstance(p, tuple) for p in parameters) and all(len(p) == len(parameters[0]) for p in parameters[1:])): return self.executemany(operation, parameters) # not a list of tuples return self.executemany(operation, [parameters]) def executemany(self, operation: str, seq_of_parameters: Sequence[Sequence | None]) -> Cursor: """Prepare operation and execute it against a parameter sequence.""" if not seq_of_parameters: # don't do anything without parameters return self self._description = None self.rowcount = -1 # first try to execute all queries rowcount = 0 sql = "BEGIN" try: if not self._connection._tnx and not self._connection.autocommit: try: self._src.execute(sql) except DatabaseError: raise # database provides error message except Exception as e: raise op_error("Can't start transaction") from e else: self._connection._tnx = True for parameters in seq_of_parameters: sql = operation sql = self._quoteparams(sql, parameters) rows = self._src.execute(sql) if rows: # true if not DML rowcount += rows else: self.rowcount = -1 except DatabaseError: raise # database provides error message except Error as err: # noinspection PyTypeChecker raise if_error(f"Error in '{sql}': '{err}'") from err except Exception as err: raise op_error(f"Internal error in '{sql}': {err}") from err # then initialize result raw count and description if self._src.resulttype == RESULT_DQL: self._description = True # fetch on demand self.rowcount = self._src.ntuples self.lastrowid = None build_row_factory = self.build_row_factory if build_row_factory: # type: ignore self.row_factory = build_row_factory() # type: ignore else: self.rowcount = rowcount self.lastrowid = self._src.oidstatus() # return the cursor object, so you can write statements such as # "cursor.execute(...).fetchall()" or "for row in cursor.execute(...)" return self def fetchone(self) -> Sequence | None: """Fetch the next row of a query result set.""" res = self.fetchmany(1, False) try: return res[0] except IndexError: return None def fetchall(self) -> Sequence[Sequence]: """Fetch all (remaining) rows of a query result.""" return self.fetchmany(-1, False) def fetchmany(self, size: int | None = None, keep: bool = False ) -> Sequence[Sequence]: """Fetch the next set of rows of a query result. The number of rows to fetch per call is specified by the size parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. If you set the keep parameter to true, this is kept as new arraysize. """ if size is None: size = self.arraysize if keep: self.arraysize = size try: result = self._src.fetch(size) except DatabaseError: raise except Error as err: raise db_error(str(err)) from err row_factory = self.row_factory coltypes = self.coltypes if coltypes is None: # cannot determine column types, return raw result return [row_factory(row) for row in result] if len(result) > 5: # optimize the case where we really fetch many values # by looking up all type casting functions upfront cast_row = self.type_cache.get_row_caster(coltypes) return [row_factory(cast_row(row)) for row in result] cast_value = self.type_cache.typecast return [row_factory([cast_value(value, typ) for typ, value in zip(coltypes, row)]) for row in result] def callproc(self, procname: str, parameters: Sequence | None = None ) -> Sequence | None: """Call a stored database procedure with the given name. The sequence of parameters must contain one entry for each input argument that the procedure expects. The result of the call is the same as this input sequence; replacement of output and input/output parameters in the return value is currently not supported. The procedure may also provide a result set as output. These can be requested through the standard fetch methods of the cursor. """ n = len(parameters) if parameters else 0 s = ','.join(n * ['%s']) query = f'select * from "{procname}"({s})' # noqa: S608 self.execute(query, parameters) return parameters # noinspection PyShadowingBuiltins def copy_from(self, stream: Any, table: str, format: str | None = None, sep: str | None = None, null: str | None = None, size: int | None = None, columns: Sequence[str] | None = None) -> Cursor: """Copy data from an input stream to the specified table. The input stream can be a file-like object with a read() method or it can also be an iterable returning a row or multiple rows of input on each iteration. The format must be 'text', 'csv' or 'binary'. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the input. The size option sets the size of the buffer used when reading data from file-like objects. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' try: read = stream.read except AttributeError as e: if size: raise ValueError( "Size must only be set for file-like objects") from e input_type: type | tuple[type, ...] type_name: str if binary_format: input_type = bytes type_name = 'byte strings' else: input_type = (bytes, str) type_name = 'strings' if isinstance(stream, (bytes, str)): if not isinstance(stream, input_type): raise ValueError(f"The input must be {type_name}") from e if not binary_format: if isinstance(stream, str): if not stream.endswith('\n'): stream += '\n' else: if not stream.endswith(b'\n'): stream += b'\n' def chunks() -> Generator: yield stream elif isinstance(stream, Iterable): def chunks() -> Generator: for chunk in stream: if not isinstance(chunk, input_type): raise ValueError( f"Input stream must consist of {type_name}") if isinstance(chunk, str): if not chunk.endswith('\n'): chunk += '\n' else: if not chunk.endswith(b'\n'): chunk += b'\n' yield chunk else: raise TypeError("Need an input stream to copy from") from e else: if size is None: size = 8192 elif not isinstance(size, int): raise TypeError("The size option must be an integer") if size > 0: def chunks() -> Generator: while True: buffer = read(size) yield buffer if not buffer or len(buffer) < size: break else: def chunks() -> Generator: yield read() if not table or not isinstance(table, str): raise TypeError("Need a table to copy to") if table.lower().startswith('select '): raise ValueError("Must specify a table, not a query") cnx = self._cnx table = '.'.join(map(cnx.escape_identifier, table.split('.', 1))) operation_parts = [f'copy {table}'] options = [] parameters = [] if format is not None: if not isinstance(format, str): raise TypeError("The format option must be be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append(f'format {format}') if sep is not None: if not isinstance(sep, str): raise TypeError("The sep option must be a string") if format == 'binary': raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') parameters.append(sep) if null is not None: if not isinstance(null, str): raise TypeError("The null option must be a string") options.append('null %s') parameters.append(null) if columns: if not isinstance(columns, str): columns = ','.join(map(cnx.escape_identifier, columns)) operation_parts.append(f'({columns})') operation_parts.append("from stdin") if options: operation_parts.append(f"({','.join(options)})") operation = ' '.join(operation_parts) putdata = self._src.putdata self.execute(operation, parameters) try: for chunk in chunks(): putdata(chunk) except BaseException as error: self.rowcount = -1 # the following call will re-raise the error putdata(error) else: rowcount = putdata(None) self.rowcount = -1 if rowcount is None else rowcount # return the cursor object, so you can chain operations return self # noinspection PyShadowingBuiltins def copy_to(self, stream: Any, table: str, format: str | None = None, sep: str | None = None, null: str | None = None, decode: bool | None = None, columns: Sequence[str] | None = None) -> Cursor | Generator: """Copy data from the specified table to an output stream. The output stream can be a file-like object with a write() method or it can also be None, in which case the method will return a generator yielding a row on each iteration. Output will be returned as byte strings unless you set decode to true. Note that you can also use a select query instead of the table name. The format must be 'text', 'csv' or 'binary'. The sep option sets the column separator (delimiter) used in the non binary formats. The null option sets the textual representation of NULL in the output. The copy operation can be restricted to a subset of columns. If no columns are specified, all of them will be copied. """ binary_format = format == 'binary' if stream is None: write = None else: try: write = stream.write except AttributeError as e: raise TypeError("Need an output stream to copy to") from e if not table or not isinstance(table, str): raise TypeError("Need a table to copy to") cnx = self._cnx if table.lower().startswith('select '): if columns: raise ValueError("Columns must be specified in the query") table = f'({table})' else: table = '.'.join(map(cnx.escape_identifier, table.split('.', 1))) operation_parts = [f'copy {table}'] options = [] parameters = [] if format is not None: if not isinstance(format, str): raise TypeError("The format option must be a string") if format not in ('text', 'csv', 'binary'): raise ValueError("Invalid format") options.append(f'format {format}') if sep is not None: if not isinstance(sep, str): raise TypeError("The sep option must be a string") if binary_format: raise ValueError( "The sep option is not allowed with binary format") if len(sep) != 1: raise ValueError( "The sep option must be a single one-byte character") options.append('delimiter %s') parameters.append(sep) if null is not None: if not isinstance(null, str): raise TypeError("The null option must be a string") options.append('null %s') parameters.append(null) if decode is None: decode = format != 'binary' else: if not isinstance(decode, (int, bool)): raise TypeError("The decode option must be a boolean") if decode and binary_format: raise ValueError( "The decode option is not allowed with binary format") if columns: if not isinstance(columns, str): columns = ','.join(map(cnx.escape_identifier, columns)) operation_parts.append(f'({columns})') operation_parts.append("to stdout") if options: operation_parts.append(f"({','.join(options)})") operation = ' '.join(operation_parts) getdata = self._src.getdata self.execute(operation, parameters) def copy() -> Generator: self.rowcount = 0 while True: row = getdata(decode) if isinstance(row, int): if self.rowcount != row: self.rowcount = row break self.rowcount += 1 yield row if write is None: # no input stream, return the generator return copy() # write the rows to the file-like input stream for row in copy(): # noinspection PyUnboundLocalVariable write(row) # return the cursor object, so you can chain operations return self def __next__(self) -> Sequence: """Return the next row (support for the iteration protocol).""" res = self.fetchone() if res is None: raise StopIteration return res # Note that the iterator protocol now uses __next()__ instead of next(), # but we keep it for backward compatibility of pgdb. next = __next__ @staticmethod def nextset() -> bool | None: """Not supported.""" raise NotSupportedError("The nextset() method is not supported") @staticmethod def setinputsizes(sizes: Sequence[int]) -> None: """Not supported.""" pass # unsupported, but silently passed @staticmethod def setoutputsize(size: int, column: int = 0) -> None: """Not supported.""" pass # unsupported, but silently passed @staticmethod def row_factory(row: Sequence) -> Sequence: """Process rows before they are returned. You can overwrite this statically with a custom row factory, or you can build a row factory dynamically with build_row_factory(). For example, you can create a Cursor class that returns rows as Python dictionaries like this: class DictCursor(pgdb.Cursor): def row_factory(self, row): return {desc[0]: value for desc, value in zip(self.description, row)} cur = DictCursor(con) # get one DictCursor instance or con.cursor_type = DictCursor # always use DictCursor instances """ raise NotImplementedError def build_row_factory(self) -> Callable[[Sequence], Sequence] | None: """Build a row factory based on the current description. This implementation builds a row factory for creating named tuples. You can overwrite this method if you want to dynamically create different row factories whenever the column description changes. """ names = self.colnames return RowCache.row_factory(tuple(names)) if names else None CursorDescription = namedtuple('CursorDescription', ( 'name', 'type_code', 'display_size', 'internal_size', 'precision', 'scale', 'null_ok')) PyGreSQL-PyGreSQL-166b135/pgdb/py.typed000066400000000000000000000000771450706350600174220ustar00rootroot00000000000000# Marker file for PEP 561. The pgdb package uses inline types. PyGreSQL-PyGreSQL-166b135/pgdb/typecode.py000066400000000000000000000015141450706350600201060ustar00rootroot00000000000000"""Support for DB API 2 type codes.""" from __future__ import annotations __all__ = ['TypeCode'] class TypeCode(str): """Class representing the type_code used by the DB-API 2.0. TypeCode objects are strings equal to the PostgreSQL type name, but carry some additional information. """ oid: int len: int type: str category: str delim: str relid: int # noinspection PyShadowingBuiltins @classmethod def create(cls, oid: int, name: str, len: int, type: str, category: str, delim: str, relid: int) -> TypeCode: """Create a type code for a PostgreSQL data type.""" self = cls(name) self.oid = oid self.len = len self.type = type self.category = category self.delim = delim self.relid = relid return selfPyGreSQL-PyGreSQL-166b135/pyproject.toml000066400000000000000000000050751450706350600177260ustar00rootroot00000000000000[project] name = "PyGreSQL" version = "6.0" requires-python = ">=3.7" authors = [ {name = "D'Arcy J. M. Cain", email = "darcy@pygresql.org"}, {name = "Christoph Zwerschke", email = "cito@online.de"}, ] description = "Python PostgreSQL interfaces" readme = "README.rst" keywords = ["pygresql", "postgresql", "database", "api", "dbapi"] classifiers = [ "Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: PostgreSQL License", "Operating System :: OS Independent", "Programming Language :: C", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: SQL", "Topic :: Database", "Topic :: Database :: Front-Ends", "Topic :: Software Development :: Libraries :: Python Modules", ] [project.license] file = "LICENSE.txt" [project.urls] Homepage = "https://pygresql.github.io/" Documentation = "https://pygresql.github.io/contents/" "Source Code" = "https://github.com/PyGreSQL/PyGreSQL" "Issue Tracker" = "https://github.com/PyGreSQL/PyGreSQL/issues/" Changelog = "https://pygresql.github.io/contents/changelog.html" Download = "https://pygresql.github.io/download/" "Mailing List" = "https://mail.vex.net/mailman/listinfo/pygresql" [tool.ruff] target-version = "py37" line-length = 79 select = [ "E", # pycodestyle "F", # pyflakes "I", # isort "N", # pep8-naming "UP", # pyupgrade "D", # pydocstyle "B", # bugbear "S", # bandit "SIM", # simplify "RUF", # ruff ] exclude = [ "__pycache__", "__pypackages__", ".git", ".tox", ".venv", ".devcontainer", ".vscode", "docs", "build", "dist", "local", "venv", ] [tool.ruff.per-file-ignores] "tests/*.py" = ["D100", "D101", "D102", "D103", "D105", "D107", "S"] [tool.mypy] python_version = "3.11" check_untyped_defs = true no_implicit_optional = true strict_optional = true warn_redundant_casts = true warn_unused_ignores = true disallow_untyped_defs = true [[tool.mypy.overrides]] module = [ "tests.*" ] disallow_untyped_defs = false [tool.setuptools] packages = ["pg", "pgdb"] license-files = ["LICENSE.txt"] [tool.setuptools.package-data] pg = ["pg.typed"] pgdb = ["pg.typed"] [build-system] requires = ["setuptools>=68", "wheel>=0.41"] build-backend = "setuptools.build_meta" PyGreSQL-PyGreSQL-166b135/setup.py000077500000000000000000000153021450706350600165210ustar00rootroot00000000000000#!/usr/bin/python """Driver script for building PyGreSQL using setuptools. You can build the PyGreSQL distribution like this: pip install build python -m build -C strict -C memory-size """ import os import platform import re import sys import warnings from distutils.ccompiler import get_default_compiler from distutils.sysconfig import get_python_inc, get_python_lib from setuptools import Extension, setup from setuptools.command.build_ext import build_ext def project_version(): """Read the PyGreSQL version from the pyproject.toml file.""" with open('pyproject.toml') as f: for d in f: if d.startswith("version ="): version = d.split("=")[1].strip().strip('"') return version raise Exception("Cannot determine PyGreSQL version") def project_readme(): """Get the content of the README file.""" with open('README.rst') as f: return f.read() version = project_version() if not (3, 7) <= sys.version_info[:2] < (4, 0): raise Exception( f"Sorry, PyGreSQL {version} does not support this Python version") long_description = project_readme() # For historical reasons, PyGreSQL does not install itself as a single # "pygresql" package, but as two top-level modules "pg", providing the # classic interface, and "pgdb" for the modern DB-API 2.0 interface. # These two top-level Python modules share the same C extension "_pg". def pg_config(s): """Retrieve information about installed version of PostgreSQL.""" f = os.popen(f'pg_config --{s}') # noqa: S605 d = f.readline().strip() if f.close() is not None: raise Exception("pg_config tool is not available.") if not d: raise Exception(f"Could not get {s} information.") return d def pg_version(): """Return the PostgreSQL version as a tuple of integers.""" match = re.search(r'(\d+)\.(\d+)', pg_config('version')) if match: return tuple(map(int, match.groups())) return 10, 0 pg_version = pg_version() libraries = ['pq'] # Make sure that the Python header files are searched before # those of PostgreSQL, because PostgreSQL can have its own Python.h include_dirs = [get_python_inc(), pg_config('includedir')] library_dirs = [get_python_lib(), pg_config('libdir')] define_macros = [('PYGRESQL_VERSION', version)] undef_macros = [] extra_compile_args = ['-O2', '-funsigned-char', '-Wall', '-Wconversion'] class build_pg_ext(build_ext): # noqa: N801 """Customized build_ext command for PyGreSQL.""" description = "build the PyGreSQL C extension" user_options = [*build_ext.user_options, # noqa: RUF012 ('strict', None, "count all compiler warnings as errors"), ('memory-size', None, "enable memory size function"), ('no-memory-size', None, "disable memory size function")] boolean_options = [*build_ext.boolean_options, # noqa: RUF012 'strict', 'memory-size'] negative_opt = { # noqa: RUF012 'no-memory-size': 'memory-size'} def get_compiler(self): """Return the C compiler used for building the extension.""" return self.compiler or get_default_compiler() def initialize_options(self): """Initialize the supported options with default values.""" build_ext.initialize_options(self) self.strict = False self.memory_size = None supported = pg_version >= (10, 0) if not supported: warnings.warn( "PyGreSQL does not support the installed PostgreSQL version.", stacklevel=2) def finalize_options(self): """Set final values for all build_pg options.""" build_ext.finalize_options(self) if self.strict: extra_compile_args.append('-Werror') wanted = self.memory_size supported = pg_version >= (12, 0) if (wanted is None and supported) or wanted: define_macros.append(('MEMORY_SIZE', None)) if not supported: warnings.warn( "The installed PostgreSQL version" " does not support the memory size function.", stacklevel=2) if sys.platform == 'win32': libraries[0] = 'lib' + libraries[0] if os.path.exists(os.path.join( library_dirs[1], libraries[0] + 'dll.lib')): libraries[0] += 'dll' compiler = self.get_compiler() if compiler == 'mingw32': # MinGW if platform.architecture()[0] == '64bit': # needs MinGW-w64 define_macros.append(('MS_WIN64', None)) elif compiler == 'msvc': # Microsoft Visual C++ extra_compile_args[1:] = [ '-J', '-W3', '-WX', '-Dinline=__inline'] # needed for MSVC 9 setup( name='PyGreSQL', version=version, description='Python PostgreSQL Interfaces', long_description=long_description, long_description_content_type='text/x-rst', keywords='pygresql postgresql database api dbapi', author="D'Arcy J. M. Cain", author_email="darcy@PyGreSQL.org", url='https://pygresql.github.io/', download_url='https://pygresql.github.io/download/', project_urls={ 'Documentation': 'https://pygresql.github.io/contents/', 'Issue Tracker': 'https://github.com/PyGreSQL/PyGreSQL/issues/', 'Mailing List': 'https://mail.vex.net/mailman/listinfo/pygresql', 'Source Code': 'https://github.com/PyGreSQL/PyGreSQL'}, classifiers=[ 'Development Status :: 6 - Mature', 'Intended Audience :: Developers', 'License :: OSI Approved :: PostgreSQL License', 'Operating System :: OS Independent', 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: SQL', 'Topic :: Database', 'Topic :: Database :: Front-Ends', 'Topic :: Software Development :: Libraries :: Python Modules'], license='PostgreSQL', test_suite='tests.discover', zip_safe=False, packages=["pg", "pgdb"], package_data={"pg": ["py.typed"], "pgdb": ["py.typed"]}, ext_modules=[Extension( 'pg._pg', ["ext/pgmodule.c"], include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros, undef_macros=undef_macros, libraries=libraries, extra_compile_args=extra_compile_args)], cmdclass=dict(build_ext=build_pg_ext), ) PyGreSQL-PyGreSQL-166b135/tests/000077500000000000000000000000001450706350600161455ustar00rootroot00000000000000PyGreSQL-PyGreSQL-166b135/tests/__init__.py000066400000000000000000000007341450706350600202620ustar00rootroot00000000000000"""PyGreSQL test suite. You can specify your local database settings in LOCAL_PyGreSQL.py. """ import unittest if not (hasattr(unittest, 'skip') and hasattr(unittest.TestCase, 'setUpClass') and hasattr(unittest.TestCase, 'skipTest') and hasattr(unittest.TestCase, 'assertIn')): raise ImportError('Please install a newer version of unittest') def discover(): loader = unittest.TestLoader() suite = loader.discover('.') return suite PyGreSQL-PyGreSQL-166b135/tests/config.py000066400000000000000000000021051450706350600177620ustar00rootroot00000000000000#!/usr/bin/python from os import environ # We need a database to test against. # The connection parameters are taken from the usual PG* environment # variables and can be overridden with PYGRESQL_* environment variables # or values specified in the file .LOCAL_PyGreSQL or LOCAL_PyGreSQL.py. # The tests should be run with various PostgreSQL versions and databases # created with different encodings and locales. Particularly, make sure the # tests are running against databases created with both SQL_ASCII and UTF8. # The current user must have create schema privilege on the database. get = environ.get dbname = get('PYGRESQL_DB', get('PGDATABASE', 'test')) dbhost = get('PYGRESQL_HOST', get('PGHOST', 'localhost')) dbport = int(get('PYGRESQL_PORT', get('PGPORT', 5432))) dbuser = get('PYGRESQL_USER', get('PGUSER')) dbpasswd = get('PYGRESQL_PASSWD', get('PGPASSWORD')) try: from .LOCAL_PyGreSQL import * # type: ignore # noqa except (ImportError, ValueError): try: # noqa from LOCAL_PyGreSQL import * # type: ignore # noqa except ImportError: pass PyGreSQL-PyGreSQL-166b135/tests/dbapi20.py000066400000000000000000000760241450706350600177510ustar00rootroot00000000000000#!/usr/bin/python """Python DB API 2.0 driver compliance unit test suite. This software is Public Domain and may be used without restrictions. Some modernization of the code has been done by the PyGreSQL team. """ from __future__ import annotations import time import unittest from contextlib import suppress from typing import Any, ClassVar __version__ = '1.15.0' class DatabaseAPI20Test(unittest.TestCase): """Test a database self.driver for DB API 2.0 compatibility. This implementation tests Gadfly, but the TestCase is structured so that other self.drivers can subclass this test case to ensure compliance with the DB-API. It is expected that this TestCase may be expanded i qn the future if ambiguities or edge conditions are discovered. The 'Optional Extensions' are not yet being tested. self.drivers should subclass this test, overriding setUp, tearDown, self.driver, connect_args and connect_kw_args. Class specification should be as follows: import dbapi20 class mytest(dbapi20.DatabaseAPI20Test): [...] Don't 'import DatabaseAPI20Test from dbapi20', or you will confuse the unit tester - just 'import dbapi20'. """ # The self.driver module. This should be the module where the 'connect' # method is to be found driver: Any = None connect_args: tuple = () # List of arguments to pass to connect connect_kw_args: ClassVar[dict[str, Any]] = {} # Keyword arguments table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables ddl1 = f'create table {table_prefix}booze (name varchar(20))' ddl2 = (f'create table {table_prefix}barflys (name varchar(20),' ' drink varchar(30))') xddl1 = f'drop table {table_prefix}booze' xddl2 = f'drop table {table_prefix}barflys' insert = 'insert' lowerfunc = 'lower' # Name of stored procedure to convert str to lowercase # Some drivers may need to override these helpers, for example adding # a 'commit' after the execute. def execute_ddl1(self, cursor): cursor.execute(self.ddl1) def execute_ddl2(self, cursor): cursor.execute(self.ddl2) def setUp(self): """Set up test fixture. self.drivers should override this method to perform required setup if any is necessary, such as creating the database. """ pass def tearDown(self): """Tear down test fixture. self.drivers should override this method to perform required cleanup if any is necessary, such as deleting the test database. The default drops the tables that may be created. """ try: con = self._connect() try: cur = con.cursor() for ddl in (self.xddl1, self.xddl2): try: cur.execute(ddl) con.commit() except self.driver.Error: # Assume table didn't exist. Other tests will check if # execute is busted. pass finally: con.close() except Exception: pass def _connect(self): try: con = self.driver.connect( *self.connect_args, **self.connect_kw_args) except AttributeError: self.fail("No connect method found in self.driver module") if not isinstance(con, self.driver.Connection): self.fail("The connect method does not return a Connection") return con def test_connect(self): con = self._connect() con.close() def test_apilevel(self): try: # Must exist apilevel = self.driver.apilevel # Must equal 2.0 self.assertEqual(apilevel, '2.0') except AttributeError: self.fail("Driver doesn't define apilevel") def test_threadsafety(self): try: # Must exist threadsafety = self.driver.threadsafety # Must be a valid value self.assertIn(threadsafety, (0, 1, 2, 3)) except AttributeError: self.fail("Driver doesn't define threadsafety") def test_paramstyle(self): try: # Must exist paramstyle = self.driver.paramstyle # Must be a valid value self.assertIn(paramstyle, ( 'qmark', 'numeric', 'named', 'format', 'pyformat')) except AttributeError: self.fail("Driver doesn't define paramstyle") def test_exceptions(self): # Make sure required exceptions exist, and are in the # defined hierarchy. sub = issubclass self.assertTrue(sub(self.driver.Warning, Exception)) self.assertTrue(sub(self.driver.Error, Exception)) self.assertTrue(sub(self.driver.InterfaceError, self.driver.Error)) self.assertTrue(sub(self.driver.DatabaseError, self.driver.Error)) self.assertTrue(sub(self.driver.OperationalError, self.driver.Error)) self.assertTrue(sub(self.driver.IntegrityError, self.driver.Error)) self.assertTrue(sub(self.driver.InternalError, self.driver.Error)) self.assertTrue(sub(self.driver.ProgrammingError, self.driver.Error)) self.assertTrue(sub(self.driver.NotSupportedError, self.driver.Error)) def test_exceptions_as_connection_attributes(self): # OPTIONAL EXTENSION # Test for the optional DB API 2.0 extension, where the exceptions # are exposed as attributes on the Connection object # I figure this optional extension will be implemented by any # driver author who is using this test suite, so it is enabled # by default. con = self._connect() drv = self.driver self.assertIs(con.Warning, drv.Warning) self.assertIs(con.Error, drv.Error) self.assertIs(con.InterfaceError, drv.InterfaceError) self.assertIs(con.DatabaseError, drv.DatabaseError) self.assertIs(con.OperationalError, drv.OperationalError) self.assertIs(con.IntegrityError, drv.IntegrityError) self.assertIs(con.InternalError, drv.InternalError) self.assertIs(con.ProgrammingError, drv.ProgrammingError) self.assertIs(con.NotSupportedError, drv.NotSupportedError) def test_commit(self): con = self._connect() try: # Commit must work, even if it doesn't do anything con.commit() finally: con.close() def test_rollback(self): con = self._connect() # If rollback is defined, it should either work or throw # the documented exception if hasattr(con, 'rollback'): with suppress(self.driver.NotSupportedError): # noinspection PyCallingNonCallable con.rollback() def test_cursor(self): con = self._connect() try: cur = con.cursor() self.assertIsNotNone(cur) finally: con.close() def test_cursor_isolation(self): con = self._connect() try: # Make sure cursors created from the same connection have # the documented transaction isolation level cur1 = con.cursor() cur2 = con.cursor() self.execute_ddl1(cur1) cur1.execute(f"{self.insert} into {self.table_prefix}booze" " values ('Victoria Bitter')") cur2.execute(f"select name from {self.table_prefix}booze") booze = cur2.fetchall() self.assertEqual(len(booze), 1) self.assertEqual(len(booze[0]), 1) self.assertEqual(booze[0][0], 'Victoria Bitter') finally: con.close() def test_description(self): con = self._connect() try: cur = con.cursor() self.execute_ddl1(cur) self.assertIsNone( cur.description, 'cursor.description should be none after executing a' ' statement that can return no rows (such as DDL)') cur.execute(f'select name from {self.table_prefix}booze') self.assertEqual( len(cur.description), 1, 'cursor.description describes too many columns') self.assertEqual( len(cur.description[0]), 7, 'cursor.description[x] tuples must have 7 elements') self.assertEqual( cur.description[0][0].lower(), 'name', 'cursor.description[x][0] must return column name') self.assertEqual( cur.description[0][1], self.driver.STRING, 'cursor.description[x][1] must return column type.' f' Got: {cur.description[0][1]!r}') # Make sure self.description gets reset self.execute_ddl2(cur) self.assertIsNone( cur.description, 'cursor.description not being set to None when executing' ' no-result statements (eg. DDL)') finally: con.close() def test_rowcount(self): con = self._connect() try: cur = con.cursor() self.execute_ddl1(cur) self.assertIn( cur.rowcount, (-1, 0), # Bug #543885 'cursor.rowcount should be -1 or 0 after executing no-result' ' statements') cur.execute(f"{self.insert} into {self.table_prefix}booze" " values ('Victoria Bitter')") self.assertIn( cur.rowcount, (-1, 1), 'cursor.rowcount should == number or rows inserted, or' ' set to -1 after executing an insert statement') cur.execute(f"select name from {self.table_prefix}booze") self.assertIn( cur.rowcount, (-1, 1), 'cursor.rowcount should == number of rows returned, or' ' set to -1 after executing a select statement') self.execute_ddl2(cur) self.assertIn( cur.rowcount, (-1, 0), # Bug #543885 'cursor.rowcount should be -1 or 0 after executing no-result' ' statements') finally: con.close() lower_func = 'lower' def test_callproc(self): con = self._connect() try: cur = con.cursor() if self.lower_func and hasattr(cur, 'callproc'): # noinspection PyCallingNonCallable r = cur.callproc(self.lower_func, ('FOO',)) self.assertEqual(len(r), 1) self.assertEqual(r[0], 'FOO') r = cur.fetchall() self.assertEqual(len(r), 1, 'callproc produced no result set') self.assertEqual( len(r[0]), 1, 'callproc produced invalid result set') self.assertEqual( r[0][0], 'foo', 'callproc produced invalid results') finally: con.close() def test_close(self): con = self._connect() try: cur = con.cursor() finally: con.close() # cursor.execute should raise an Error if called after connection # closed self.assertRaises(self.driver.Error, self.execute_ddl1, cur) # connection.commit should raise an Error if called after connection' # closed.' self.assertRaises(self.driver.Error, con.commit) def test_non_idempotent_close(self): con = self._connect() con.close() # connection.close should raise an Error if called more than once # (the usefulness of this test and this feature is questionable) self.assertRaises(self.driver.Error, con.close) def test_execute(self): con = self._connect() try: cur = con.cursor() self._paraminsert(cur) finally: con.close() def _paraminsert(self, cur): self.execute_ddl2(cur) table_prefix = self.table_prefix insert = f"{self.insert} into {table_prefix}barflys values" cur.execute( f"{insert} ('Victoria Bitter'," " 'thi%s :may ca%(u)se? troub:1e')") self.assertIn(cur.rowcount, (-1, 1)) if self.driver.paramstyle == 'qmark': cur.execute( f"{insert} (?, 'thi%s :may ca%(u)se? troub:1e')", ("Cooper's",)) elif self.driver.paramstyle == 'numeric': cur.execute( f"{insert} (:1, 'thi%s :may ca%(u)se? troub:1e')", ("Cooper's",)) elif self.driver.paramstyle == 'named': cur.execute( f"{insert} (:beer, 'thi%s :may ca%(u)se? troub:1e')", {'beer': "Cooper's"}) elif self.driver.paramstyle == 'format': cur.execute( f"{insert} (%s, 'thi%%s :may ca%%(u)se? troub:1e')", ("Cooper's",)) elif self.driver.paramstyle == 'pyformat': cur.execute( f"{insert} (%(beer)s, 'thi%%s :may ca%%(u)se? troub:1e')", {'beer': "Cooper's"}) else: self.fail('Invalid paramstyle') self.assertIn(cur.rowcount, (-1, 1)) cur.execute(f'select name, drink from {table_prefix}barflys') res = cur.fetchall() self.assertEqual(len(res), 2, 'cursor.fetchall returned too few rows') beers = [res[0][0], res[1][0]] beers.sort() self.assertEqual( beers[0], "Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted' ' incorrectly') self.assertEqual( beers[1], "Victoria Bitter", 'cursor.fetchall retrieved incorrect data, or data inserted' ' incorrectly') trouble = "thi%s :may ca%(u)se? troub:1e" self.assertEqual( res[0][1], trouble, 'cursor.fetchall retrieved incorrect data, or data inserted' f' incorrectly. Got: {res[0][1]!r}, Expected: {trouble!r}') self.assertEqual( res[1][1], trouble, 'cursor.fetchall retrieved incorrect data, or data inserted' f' incorrectly. Got: {res[1][1]!r}, Expected: {trouble!r}') def test_executemany(self): con = self._connect() try: cur = con.cursor() self.execute_ddl1(cur) table_prefix = self.table_prefix insert = f'{self.insert} into {table_prefix}booze values' largs = [("Cooper's",), ("Boag's",)] margs = [{'beer': "Cooper's"}, {'beer': "Boag's"}] if self.driver.paramstyle == 'qmark': cur.executemany(f'{insert} (?)', largs) elif self.driver.paramstyle == 'numeric': cur.executemany(f'{insert} (:1)', largs) elif self.driver.paramstyle == 'named': cur.executemany(f'{insert} (:beer)', margs) elif self.driver.paramstyle == 'format': cur.executemany(f'{insert} (%s)', largs) elif self.driver.paramstyle == 'pyformat': cur.executemany(f'{insert} (%(beer)s)', margs) else: self.fail('Unknown paramstyle') self.assertIn( cur.rowcount, (-1, 2), 'insert using cursor.executemany set cursor.rowcount to' f' incorrect value {cur.rowcount!r}') cur.execute(f'select name from {table_prefix}booze') res = cur.fetchall() self.assertEqual( len(res), 2, 'cursor.fetchall retrieved incorrect number of rows') beers = [res[0][0], res[1][0]] beers.sort() self.assertEqual(beers[0], "Boag's", 'incorrect data retrieved') self.assertEqual(beers[1], "Cooper's", 'incorrect data retrieved') finally: con.close() def test_fetchone(self): con = self._connect() try: cur = con.cursor() # cursor.fetchone should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error, cur.fetchone) # cursor.fetchone should raise an Error if called after # executing a query that cannot return rows self.execute_ddl1(cur) self.assertRaises(self.driver.Error, cur.fetchone) cur.execute(f'select name from {self.table_prefix}booze') self.assertIsNone( cur.fetchone(), 'cursor.fetchone should return None if a query retrieves' ' no rows') self.assertIn(cur.rowcount, (-1, 0)) # cursor.fetchone should raise an Error if called after # executing a query that cannot return rows cur.execute( f"{self.insert} into {self.table_prefix}booze" " values ('Victoria Bitter')") self.assertRaises(self.driver.Error, cur.fetchone) cur.execute(f'select name from {self.table_prefix}booze') r = cur.fetchone() self.assertEqual( len(r), 1, 'cursor.fetchone should have retrieved a single row') self.assertEqual( r[0], 'Victoria Bitter', 'cursor.fetchone retrieved incorrect data') self.assertIsNone( cur.fetchone(), 'cursor.fetchone should return None if no more rows available') self.assertIn(cur.rowcount, (-1, 1)) finally: con.close() def test_next(self): """Test extension for getting the next row.""" con = self._connect() try: cur = con.cursor() if not hasattr(cur, 'next'): return # cursor.next should raise an Error if called before # executing a select-type query self.assertRaises(self.driver.Error, cur.next) # cursor.next should raise an Error if called after # executing a query that cannot return rows self.execute_ddl1(cur) self.assertRaises(self.driver.Error, cur.next) # cursor.next should return None if a query retrieves no rows cur.execute(f'select name from {self.table_prefix}booze') self.assertRaises(StopIteration, cur.next) self.assertIn(cur.rowcount, (-1, 0)) # cursor.next should raise an Error if called after # executing a query that cannot return rows cur.execute(f"{self.insert} into {self.table_prefix}booze" " values ('Victoria Bitter')") self.assertRaises(self.driver.Error, cur.next) cur.execute(f'select name from {self.table_prefix}booze') r = cur.next() self.assertEqual( len(r), 1, 'cursor.fetchone should have retrieved a single row') self.assertEqual( r[0], 'Victoria Bitter', 'cursor.next retrieved incorrect data') # cursor.next should raise StopIteration if no more rows available self.assertRaises(StopIteration, cur.next) self.assertIn(cur.rowcount, (-1, 1)) finally: con.close() samples = ( 'Carlton Cold', 'Carlton Draft', 'Mountain Goat', 'Redback', 'Victoria Bitter', 'XXXX' ) def _populate(self): """Return a list of SQL commands to setup the DB for fetching tests.""" populate = [ f"{self.insert} into {self.table_prefix}booze values ('{s}')" for s in self.samples] return populate def test_fetchmany(self): con = self._connect() try: cur = con.cursor() # cursor.fetchmany should raise an Error if called without # issuing a query self.assertRaises(self.driver.Error, cur.fetchmany, 4) self.execute_ddl1(cur) for sql in self._populate(): cur.execute(sql) cur.execute(f'select name from {self.table_prefix}booze') r = cur.fetchmany() self.assertEqual( len(r), 1, 'cursor.fetchmany retrieved incorrect number of rows,' ' default of arraysize is one.') cur.arraysize = 10 r = cur.fetchmany(3) # Should get 3 rows self.assertEqual( len(r), 3, 'cursor.fetchmany retrieved incorrect number of rows') r = cur.fetchmany(4) # Should get 2 more self.assertEqual( len(r), 2, 'cursor.fetchmany retrieved incorrect number of rows') r = cur.fetchmany(4) # Should be an empty sequence self.assertEqual( len(r), 0, 'cursor.fetchmany should return an empty sequence after' ' results are exhausted') self.assertIn(cur.rowcount, (-1, 6)) # Same as above, using cursor.arraysize cur.arraysize = 4 cur.execute(f'select name from {self.table_prefix}booze') r = cur.fetchmany() # Should get 4 rows self.assertEqual( len(r), 4, 'cursor.arraysize not being honoured by fetchmany') r = cur.fetchmany() # Should get 2 more self.assertEqual(len(r), 2) r = cur.fetchmany() # Should be an empty sequence self.assertEqual(len(r), 0) self.assertIn(cur.rowcount, (-1, 6)) cur.arraysize = 6 cur.execute(f'select name from {self.table_prefix}booze') rows = cur.fetchmany() # Should get all rows self.assertIn(cur.rowcount, (-1, 6)) self.assertEqual(len(rows), 6) self.assertEqual(len(rows), 6) rows = [r[0] for r in rows] rows.sort() # Make sure we get the right data back out for i in range(0, 6): self.assertEqual( rows[i], self.samples[i], 'incorrect data retrieved by cursor.fetchmany') rows = cur.fetchmany() # Should return an empty list self.assertEqual( len(rows), 0, 'cursor.fetchmany should return an empty sequence if' ' called after the whole result set has been fetched') self.assertIn(cur.rowcount, (-1, 6)) self.execute_ddl2(cur) cur.execute(f'select name from {self.table_prefix}barflys') r = cur.fetchmany() # Should get empty sequence self.assertEqual( len(r), 0, 'cursor.fetchmany should return an empty sequence if' ' query retrieved no rows') self.assertIn(cur.rowcount, (-1, 0)) finally: con.close() def test_fetchall(self): con = self._connect() try: cur = con.cursor() # cursor.fetchall should raise an Error if called # without executing a query that may return rows (such # as a select) self.assertRaises(self.driver.Error, cur.fetchall) self.execute_ddl1(cur) for sql in self._populate(): cur.execute(sql) # cursor.fetchall should raise an Error if called # after executing a a statement that cannot return rows self.assertRaises(self.driver.Error, cur.fetchall) cur.execute(f'select name from {self.table_prefix}booze') rows = cur.fetchall() self.assertIn(cur.rowcount, (-1, len(self.samples))) self.assertEqual( len(rows), len(self.samples), 'cursor.fetchall did not retrieve all rows') rows = sorted(r[0] for r in rows) for i in range(0, len(self.samples)): self.assertEqual( rows[i], self.samples[i], 'cursor.fetchall retrieved incorrect rows') rows = cur.fetchall() self.assertEqual( len(rows), 0, 'cursor.fetchall should return an empty list if called' ' after the whole result set has been fetched') self.assertIn(cur.rowcount, (-1, len(self.samples))) self.execute_ddl2(cur) cur.execute(f'select name from {self.table_prefix}barflys') rows = cur.fetchall() self.assertIn(cur.rowcount, (-1, 0)) self.assertEqual( len(rows), 0, 'cursor.fetchall should return an empty list if' ' a select query returns no rows') finally: con.close() def test_mixedfetch(self): con = self._connect() try: cur = con.cursor() self.execute_ddl1(cur) for sql in self._populate(): cur.execute(sql) cur.execute(f'select name from {self.table_prefix}booze') rows1 = cur.fetchone() rows23 = cur.fetchmany(2) rows4 = cur.fetchone() rows56 = cur.fetchall() self.assertIn(cur.rowcount, (-1, 6)) self.assertEqual( len(rows23), 2, 'fetchmany returned incorrect number of rows') self.assertEqual( len(rows56), 2, 'fetchall returned incorrect number of rows') rows = [rows1[0]] rows.extend([rows23[0][0], rows23[1][0]]) rows.append(rows4[0]) rows.extend([rows56[0][0], rows56[1][0]]) rows.sort() for i in range(0, len(self.samples)): self.assertEqual( rows[i], self.samples[i], 'incorrect data retrieved or inserted') finally: con.close() def help_nextset_setup(self, cur): """Set up nextset test. Should create a procedure called deleteme that returns two result sets, first the number of rows in booze, then "name from booze". """ raise NotImplementedError('Helper not implemented') # sql = """ # create procedure deleteme as # begin # select count(*) from booze # select name from booze # end # """ # cur.execute(sql) def help_nextset_teardown(self, cur): """Clean up after nextset test. If cleaning up is needed after test_nextset. """ raise NotImplementedError('Helper not implemented') # cur.execute("drop procedure deleteme") def test_nextset(self): """Test the nextset functionality.""" raise NotImplementedError('Drivers need to override this test') # example test implementation only: # con = self._connect() # try: # cur = con.cursor() # if not hasattr(cur, 'nextset'): # return # try: # self.executeDDL1(cur) # for sql in self._populate(): # cur.execute(sql) # self.help_nextset_setup(cur) # cur.callproc('deleteme') # number_of_rows = cur.fetchone() # self.assertEqual(number_of_rows[0], len(self.samples)) # self.assertTrue(cur.nextset()) # names = cur.fetchall() # self.assertEqual(len(names), len(self.samples)) # self.assertIsNone( # cur.nextset(), 'No more return sets, should return None') # finally: # self.help_nextset_teardown(cur) # finally: # con.close() def test_arraysize(self): # Not much here - rest of the tests for this are in test_fetchmany con = self._connect() try: cur = con.cursor() self.assertTrue(hasattr(cur, 'arraysize'), 'cursor.arraysize must be defined') finally: con.close() def test_setinputsizes(self): con = self._connect() try: cur = con.cursor() cur.setinputsizes((25,)) self._paraminsert(cur) # Make sure cursor still works finally: con.close() def test_setoutputsize_basic(self): # Basic test is to make sure setoutputsize doesn't blow up con = self._connect() try: cur = con.cursor() cur.setoutputsize(1000) cur.setoutputsize(2000, 0) self._paraminsert(cur) # Make sure the cursor still works finally: con.close() def test_setoutputsize(self): # Real test for setoutputsize is driver dependant raise NotImplementedError('Driver needed to override this test') def test_none(self): con = self._connect() try: cur = con.cursor() self.execute_ddl2(cur) # inserting NULL to the second column, because some drivers might # need the first one to be primary key, which means it needs # to have a non-NULL value cur.execute(f"{self.insert} into {self.table_prefix}barflys" " values ('a', NULL)") cur.execute(f'select drink from {self.table_prefix}barflys') r = cur.fetchall() self.assertEqual(len(r), 1) self.assertEqual(len(r[0]), 1) self.assertIsNone(r[0][0], 'NULL value not returned as None') finally: con.close() def test_date(self): d1 = self.driver.Date(2002, 12, 25) d2 = self.driver.DateFromTicks( time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))) # Can we assume this? API doesn't specify, but it seems implied self.assertEqual(str(d1), str(d2)) def test_time(self): t1 = self.driver.Time(13, 45, 30) t2 = self.driver.TimeFromTicks( time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))) # Can we assume this? API doesn't specify, but it seems implied self.assertEqual(str(t1), str(t2)) def test_timestamp(self): t1 = self.driver.Timestamp(2002, 12, 25, 13, 45, 30) t2 = self.driver.TimestampFromTicks( time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)) ) # Can we assume this? API doesn't specify, but it seems implied self.assertEqual(str(t1), str(t2)) def test_binary_string(self): self.driver.Binary(b'Something') self.driver.Binary(b'') def test_string_type(self): self.assertTrue(hasattr(self.driver, 'STRING'), 'module.STRING must be defined') def test_binary_type(self): self.assertTrue(hasattr(self.driver, 'BINARY'), 'module.BINARY must be defined.') def test_number_type(self): self.assertTrue(hasattr(self.driver, 'NUMBER'), 'module.NUMBER must be defined.') def test_datetime_type(self): self.assertTrue(hasattr(self.driver, 'DATETIME'), 'module.DATETIME must be defined.') def test_rowid_type(self): self.assertTrue(hasattr(self.driver, 'ROWID'), 'module.ROWID must be defined.') PyGreSQL-PyGreSQL-166b135/tests/test_classic.py000077500000000000000000000253131450706350600212060ustar00rootroot00000000000000#!/usr/bin/python import unittest from contextlib import suppress from functools import partial from threading import Thread from time import sleep from pg import ( DB, DatabaseError, Error, IntegrityError, NotificationHandler, NotSupportedError, ProgrammingError, ) from .config import dbhost, dbname, dbpasswd, dbport, dbuser def open_db(): db = DB(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd) db.query("SET DATESTYLE TO 'ISO'") db.query("SET TIME ZONE 'EST5EDT'") db.query("SET DEFAULT_WITH_OIDS=FALSE") db.query("SET CLIENT_MIN_MESSAGES=WARNING") db.query("SET STANDARD_CONFORMING_STRINGS=FALSE") return db class UtilityTest(unittest.TestCase): @classmethod def setUpClass(cls): """Recreate test tables and schemas.""" db = open_db() with suppress(Exception): db.query("DROP VIEW _test_vschema") with suppress(Exception): db.query("DROP TABLE _test_schema") db.query("CREATE TABLE _test_schema" " (_test int PRIMARY KEY, _i interval, dvar int DEFAULT 999)") db.query("CREATE VIEW _test_vschema AS" " SELECT _test, 'abc'::text AS _test2 FROM _test_schema") for t in ('_test1', '_test2'): with suppress(Exception): db.query("CREATE SCHEMA " + t) with suppress(Exception): db.query(f"DROP TABLE {t}._test_schema") db.query(f"CREATE TABLE {t}._test_schema" f" ({t} int PRIMARY KEY)") db.close() def setUp(self): """Set up test tables or empty them if they already exist.""" db = open_db() db.query("TRUNCATE TABLE _test_schema") for t in ('_test1', '_test2'): db.query(f"TRUNCATE TABLE {t}._test_schema") db.close() def test_invalid_name(self): """Make sure that invalid table names are caught.""" db = open_db() self.assertRaises(NotSupportedError, db.get_attnames, 'x.y.z') def test_schema(self): """Check differentiation of same table name in different schemas.""" db = open_db() # see if they differentiate the table names properly self.assertEqual( db.get_attnames('_test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('public._test_schema'), {'_test': 'int', '_i': 'date', 'dvar': 'int'} ) self.assertEqual( db.get_attnames('_test1._test_schema'), {'_test1': 'int'} ) self.assertEqual( db.get_attnames('_test2._test_schema'), {'_test2': 'int'} ) def test_pkey(self): db = open_db() self.assertEqual(db.pkey('_test_schema'), '_test') self.assertEqual(db.pkey('public._test_schema'), '_test') self.assertEqual(db.pkey('_test1._test_schema'), '_test1') self.assertEqual(db.pkey('_test2._test_schema'), '_test2') self.assertRaises(KeyError, db.pkey, '_test_vschema') def test_get(self): db = open_db() db.query("INSERT INTO _test_schema VALUES (1234)") db.get('_test_schema', 1234) db.get('_test_schema', 1234, keyname='_test') self.assertRaises(ProgrammingError, db.get, '_test_vschema', 1234) db.get('_test_vschema', 1234, keyname='_test') def test_params(self): db = open_db() db.query("INSERT INTO _test_schema VALUES ($1, $2, $3)", 12, None, 34) d = db.get('_test_schema', 12) self.assertEqual(d['dvar'], 34) def test_insert(self): db = open_db() d = dict(_test=1234) db.insert('_test_schema', d) self.assertEqual(d['dvar'], 999) db.insert('_test_schema', _test=1235) self.assertEqual(d['dvar'], 999) def test_context_manager(self): db = open_db() t = '_test_schema' d = dict(_test=1235) with db: db.insert(t, d) d['_test'] += 1 db.insert(t, d) try: with db: d['_test'] += 1 db.insert(t, d) db.insert(t, d) except IntegrityError: pass with db: d['_test'] += 1 db.insert(t, d) d['_test'] += 1 db.insert(t, d) self.assertTrue(db.get(t, 1235)) self.assertTrue(db.get(t, 1236)) self.assertRaises(DatabaseError, db.get, t, 1237) self.assertTrue(db.get(t, 1238)) self.assertTrue(db.get(t, 1239)) def test_sqlstate(self): db = open_db() db.query("INSERT INTO _test_schema VALUES (1234)") try: db.query("INSERT INTO _test_schema VALUES (1234)") except DatabaseError as error: self.assertIsInstance(error, IntegrityError) # the SQLSTATE error code for unique violation is 23505 # noinspection PyUnresolvedReferences self.assertEqual(error.sqlstate, '23505') def test_mixed_case(self): db = open_db() try: db.query('CREATE TABLE _test_mc ("_Test" int PRIMARY KEY)') except Error: db.query("TRUNCATE TABLE _test_mc") d = dict(_Test=1234) r = db.insert('_test_mc', d) self.assertEqual(r, d) def test_update(self): db = open_db() db.query("INSERT INTO _test_schema VALUES (1234)") r = db.get('_test_schema', 1234) r['dvar'] = 123 db.update('_test_schema', r) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 123) r = db.get('_test_schema', 1234) self.assertIn('dvar', r) db.update('_test_schema', _test=1234, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) r = db.get('_test_schema', 1234) db.update('_test_schema', r, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) def notify_callback(self, arg_dict): if arg_dict: arg_dict['called'] = True else: self.notify_timeout = True def test_notify(self, options=None): if not options: options = {} run_as_method = options.get('run_as_method') call_notify = options.get('call_notify') two_payloads = options.get('two_payloads') db = open_db() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1'. target = fut('event_1', self.notify_callback, arg_dict, 5) thread = Thread(None, target) thread.start() try: # Wait until the thread has started. for _n in range(500): if target.listening: break sleep(0.01) self.assertTrue(target.listening) self.assertTrue(thread.is_alive()) # Open another connection for sending notifications. db2 = open_db() # Generate notification from the other connection. if two_payloads: db2.begin() if call_notify: if two_payloads: target.notify(db2, payload='payload 0') target.notify(db2, payload='payload 1') else: if two_payloads: db2.query("notify event_1, 'payload 0'") db2.query("notify event_1, 'payload 1'") if two_payloads: db2.commit() # Wait until the notification has been caught. for _n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'event_1') self.assertEqual(arg_dict['extra'], 'payload 1') self.assertIsInstance(arg_dict['pid'], int) self.assertFalse(self.notify_timeout) arg_dict['called'] = False self.assertTrue(thread.is_alive()) # Generate stop notification. if call_notify: target.notify(db2, stop=True, payload='payload 2') else: db2.query("notify stop_event_1, 'payload 2'") db2.close() # Wait until the notification has been caught. for _n in range(500): if arg_dict['called'] or self.notify_timeout: break sleep(0.01) # Check that callback has been invoked. self.assertTrue(arg_dict['called']) self.assertEqual(arg_dict['event'], 'stop_event_1') self.assertEqual(arg_dict['extra'], 'payload 2') self.assertIsInstance(arg_dict['pid'], int) self.assertFalse(self.notify_timeout) thread.join(5) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() except Exception: target.close() if thread.is_alive(): thread.join(5) def test_notify_other_options(self): for run_as_method in False, True: for call_notify in False, True: for two_payloads in False, True: options = dict( run_as_method=run_as_method, call_notify=call_notify, two_payloads=two_payloads) if any(options.values()): self.test_notify(options) def test_notify_timeout(self): for run_as_method in False, True: db = open_db() # Get function under test, can be standalone or DB method. fut = db.notification_handler if run_as_method else partial( NotificationHandler, db) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1' with timeout of 50ms. target = fut('event_1', self.notify_callback, arg_dict, 0.05) thread = Thread(None, target) thread.start() # Sleep 250ms, long enough to time out. sleep(0.25) # Verify that we've indeed timed out. self.assertFalse(arg_dict.get('called')) self.assertTrue(self.notify_timeout) self.assertFalse(thread.is_alive()) self.assertFalse(target.listening) target.close() if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_classic_attrdict.py000066400000000000000000000052341450706350600231010ustar00rootroot00000000000000#!/usr/bin/python """Test the classic PyGreSQL interface. Sub-tests for the DB wrapper object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ import unittest import pg.attrs # the module under test class TestAttrDict(unittest.TestCase): """Test the simple ordered dictionary for attribute names.""" cls = pg.attrs.AttrDict def test_init(self): a = self.cls() self.assertIsInstance(a, dict) self.assertEqual(a, {}) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertIsInstance(a, dict) self.assertEqual(a, dict(items)) iteritems = iter(items) a = self.cls(iteritems) self.assertIsInstance(a, dict) self.assertEqual(a, dict(items)) def test_iter(self): a = self.cls() self.assertEqual(list(a), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a), keys) def test_keys(self): a = self.cls() self.assertEqual(list(a.keys()), []) keys = ['id', 'name', 'age'] items = [(key, None) for key in keys] a = self.cls(items) self.assertEqual(list(a.keys()), keys) def test_values(self): a = self.cls() self.assertEqual(list(a.values()), []) items = [('id', 'int'), ('name', 'text')] values = [item[1] for item in items] a = self.cls(items) self.assertEqual(list(a.values()), values) def test_items(self): a = self.cls() self.assertEqual(list(a.items()), []) items = [('id', 'int'), ('name', 'text')] a = self.cls(items) self.assertEqual(list(a.items()), items) def test_get(self): a = self.cls([('id', 1)]) try: self.assertEqual(a['id'], 1) except KeyError: self.fail('AttrDict should be readable') def test_set(self): a = self.cls() try: a['id'] = 1 except TypeError: pass else: self.fail('AttrDict should be read-only') def test_del(self): a = self.cls([('id', 1)]) try: del a['id'] except TypeError: pass else: self.fail('AttrDict should be read-only') def test_write_methods(self): a = self.cls([('id', 1)]) self.assertEqual(a['id'], 1) for method in 'clear', 'update', 'pop', 'setdefault', 'popitem': method = getattr(a, method) self.assertRaises(TypeError, method, a) # type: ignore if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_classic_connection.py000077500000000000000000003057421450706350600234340ustar00rootroot00000000000000#!/usr/bin/python """Test the classic PyGreSQL interface. Sub-tests for the low-level connection object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ from __future__ import annotations import os import threading import time import unittest from collections import namedtuple from collections.abc import Iterable from contextlib import suppress from decimal import Decimal from typing import Any, Sequence import pg # the module under test from .config import dbhost, dbname, dbpasswd, dbport, dbuser windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def connect(): """Create a basic pg connection to the test database.""" # noinspection PyArgumentList connection = pg.connect(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd) connection.query("set client_min_messages=warning") return connection def connect_nowait(): """Start a basic pg connection in a non-blocking manner.""" # noinspection PyArgumentList return pg.connect(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd, nowait=True) class TestCanConnect(unittest.TestCase): """Test whether a basic connection to PostgreSQL is possible.""" def test_can_connect(self): try: connection = connect() rc = connection.poll() except pg.Error as error: self.fail(f'Cannot connect to database {dbname}:\n{error}') self.assertEqual(rc, pg.POLLING_OK) self.assertIs(connection.is_non_blocking(), False) connection.set_non_blocking(True) self.assertIs(connection.is_non_blocking(), True) connection.set_non_blocking(False) self.assertIs(connection.is_non_blocking(), False) try: connection.close() except pg.Error: self.fail('Cannot close the database connection') def test_can_connect_no_wait(self): try: connection = connect_nowait() rc = connection.poll() self.assertIn(rc, (pg.POLLING_READING, pg.POLLING_WRITING)) while rc not in (pg.POLLING_OK, pg.POLLING_FAILED): rc = connection.poll() except pg.Error as error: self.fail(f'Cannot connect to database {dbname}:\n{error}') self.assertEqual(rc, pg.POLLING_OK) self.assertIs(connection.is_non_blocking(), False) connection.set_non_blocking(True) self.assertIs(connection.is_non_blocking(), True) connection.set_non_blocking(False) self.assertIs(connection.is_non_blocking(), False) try: connection.close() except pg.Error: self.fail('Cannot close the database connection') class TestConnectObject(unittest.TestCase): """Test existence of basic pg connection methods.""" def setUp(self): self.connection = connect() def tearDown(self): with suppress(pg.InternalError): self.connection.close() def is_method(self, attribute): """Check if given attribute on the connection is a method.""" if do_not_ask_for_host and attribute == 'host': return False return callable(getattr(self.connection, attribute)) def test_class_name(self): self.assertEqual(self.connection.__class__.__name__, 'Connection') def test_module_name(self): self.assertEqual(self.connection.__class__.__module__, 'pg') def test_str(self): r = str(self.connection) self.assertTrue(r.startswith(' 5: break r = self.connection.cancel() # cancel the running query thread.join() # wait for the thread to end t2 = time.time() self.assertIsInstance(r, int) self.assertEqual(r, 1) # return code should be 1 self.assertLessEqual(t2 - t1, 3) # time should be under 3 seconds self.assertTrue(errors) def test_method_file_no(self): r = self.connection.fileno() self.assertIsInstance(r, int) self.assertGreaterEqual(r, 0) def test_method_transaction(self): transaction = self.connection.transaction self.assertRaises(TypeError, transaction, None) self.assertEqual(transaction(), pg.TRANS_IDLE) self.connection.query('begin') self.assertEqual(transaction(), pg.TRANS_INTRANS) self.connection.query('rollback') self.assertEqual(transaction(), pg.TRANS_IDLE) def test_method_parameter(self): parameter = self.connection.parameter query = self.connection.query self.assertRaises(TypeError, parameter) r = parameter('this server setting does not exist') self.assertIsNone(r) s = query('show server_version').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_version') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) s = query('show client_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('client_encoding') self.assertEqual(r, s) s = query('show server_encoding').getresult()[0][0] self.assertIsNotNone(s) r = parameter('server_encoding') self.assertEqual(r, s) class TestSimpleQueries(unittest.TestCase): """Test simple queries via a basic pg connection.""" def setUp(self): self.c = connect() def tearDown(self): self.doCleanups() self.c.close() def test_class_name(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__name__, 'Query') def test_module_name(self): r = self.c.query("select 1") self.assertEqual(r.__class__.__module__, 'pg') def test_str(self): q = ("select 1 as a, 'hello' as h, 'w' as world" " union select 2, 'xyz', 'uvw'") r = self.c.query(q) self.assertEqual( str(r), 'a| h |world\n' '-+-----+-----\n' '1|hello|w \n' '2|xyz |uvw \n' '(2 rows)') def test_repr(self): r = repr(self.c.query("select 1")) self.assertTrue(r.startswith(' 0: field_name = f'"{field_name}"' r = f(field_name) self.assertIsInstance(r, tuple) self.assertEqual(len(r), 4) self.assertEqual(r, info) r = f(field_num) self.assertIsInstance(r, tuple) self.assertEqual(len(r), 4) self.assertEqual(r, info) self.assertRaises(IndexError, f, 'foobaz') self.assertRaises(IndexError, f, '"Foobar"') self.assertRaises(IndexError, f, -1) self.assertRaises(IndexError, f, 4) def test_len(self): q = "select 1 where false" self.assertEqual(len(self.c.query(q)), 0) q = ("select 1 as a, 2 as b, 3 as c, 4 as d" " union select 5 as a, 6 as b, 7 as c, 8 as d") self.assertEqual(len(self.c.query(q)), 2) q = ("select 1 union select 2 union select 3" " union select 4 union select 5 union select 6") self.assertEqual(len(self.c.query(q)), 6) def test_query(self): query = self.c.query query("drop table if exists test_table") self.addCleanup(query, "drop table test_table") q = "create table test_table (n integer)" r = query(q) self.assertIsNone(r) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "select n from test_table where n>1" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, int) self.assertEqual(r, 2) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') # noinspection SqlWithoutWhere q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def test_query_with_oids(self): if self.c.server_version >= 120000: self.skipTest("database does not support tables with oids") query = self.c.query query("drop table if exists test_table") self.addCleanup(query, "drop table test_table") q = "create table test_table (n integer) with oids" r = query(q) self.assertIsNone(r) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, int) q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, int) oid = r q = "select oid from test_table where n=2" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, int) self.assertEqual(r, oid) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') # noinspection SqlWithoutWhere q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def test_mem_size(self): # noinspection PyUnresolvedReferences if pg.get_pqlib_version() < 120000: self.skipTest("pqlib does not support memsize()") query = self.c.query q = query("select repeat('foo!', 8)") size = q.memsize() self.assertIsInstance(size, int) self.assertGreaterEqual(size, 32) self.assertLess(size, 8000) q = query("select repeat('foo!', 2000)") size = q.memsize() self.assertGreaterEqual(size, 8000) self.assertLess(size, 16000) class TestUnicodeQueries(unittest.TestCase): """Test unicode strings as queries via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def test_getresul_ascii(self): result = 'Hello, world!' cmd = f"select '{result}'" v = self.c.query(cmd).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_dictresul_ascii(self): result = 'Hello, world!' cmd = f"select '{result}' as greeting" v = self.c.query(cmd).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_getresult_utf8(self): result = 'Hello, wörld & мир!' cmd = f"select '{result}'" # pass the query as unicode try: v = self.c.query(cmd).getresult()[0][0] except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode() v = self.c.query(cmd_bytes).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_dictresult_utf8(self): result = 'Hello, wörld & мир!' cmd = f"select '{result}' as greeting" try: v = self.c.query(cmd).dictresult()[0]['greeting'] except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode() v = self.c.query(cmd_bytes).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_getresult_latin1(self): try: self.c.query('set client_encoding=latin1') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") result = 'Hello, wörld!' cmd = f"select '{result}'" v = self.c.query(cmd).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('latin1') v = self.c.query(cmd_bytes).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_dictresult_latin1(self): try: self.c.query('set client_encoding=latin1') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") result = 'Hello, wörld!' cmd = f"select '{result}' as greeting" v = self.c.query(cmd).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('latin1') v = self.c.query(cmd_bytes).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_getresult_cyrillic(self): try: self.c.query('set client_encoding=iso_8859_5') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") result = 'Hello, мир!' cmd = f"select '{result}'" v = self.c.query(cmd).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('cyrillic') v = self.c.query(cmd_bytes).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_dictresult_cyrillic(self): try: self.c.query('set client_encoding=iso_8859_5') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") result = 'Hello, мир!' cmd = f"select '{result}' as greeting" v = self.c.query(cmd).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('cyrillic') v = self.c.query(cmd_bytes).dictresult()[0]['greeting'] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_getresult_latin9(self): try: self.c.query('set client_encoding=latin9') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") result = 'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)' cmd = f"select '{result}'" v = self.c.query(cmd).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('latin9') v = self.c.query(cmd_bytes).getresult()[0][0] self.assertIsInstance(v, str) self.assertEqual(v, result) def test_dictresult_latin9(self): try: self.c.query('set client_encoding=latin9') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") result = 'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)' cmd = f"select '{result}' as menu" v = self.c.query(cmd).dictresult()[0]['menu'] self.assertIsInstance(v, str) self.assertEqual(v, result) cmd_bytes = cmd.encode('latin9') v = self.c.query(cmd_bytes).dictresult()[0]['menu'] self.assertIsInstance(v, str) self.assertEqual(v, result) class TestParamQueries(unittest.TestCase): """Test queries with parameters via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def test_query_with_none_param(self): self.assertRaises(TypeError, self.c.query, "select $1", None) self.assertRaises(TypeError, self.c.query, "select $1+$2", None, None) self.assertEqual( self.c.query("select $1::integer", (None,)).getresult(), [(None,)]) self.assertEqual( self.c.query("select $1::text", [None]).getresult(), [(None,)]) self.assertEqual( self.c.query("select $1::text", [[None]]).getresult(), [(None,)]) def test_query_with_bool_params(self, bool_enabled=None): query = self.c.query bool_enabled_default = None if bool_enabled is not None: bool_enabled_default = pg.get_bool() pg.set_bool(bool_enabled) try: bool_on = bool_enabled or bool_enabled is None v_false, v_true = (False, True) if bool_on else 'ft' r_false, r_true = [(v_false,)], [(v_true,)] self.assertEqual(query("select false").getresult(), r_false) self.assertEqual(query("select true").getresult(), r_true) q = "select $1::bool" self.assertEqual(query(q, (None,)).getresult(), [(None,)]) self.assertEqual(query(q, ('f',)).getresult(), r_false) self.assertEqual(query(q, ('t',)).getresult(), r_true) self.assertEqual(query(q, ('false',)).getresult(), r_false) self.assertEqual(query(q, ('true',)).getresult(), r_true) self.assertEqual(query(q, ('n',)).getresult(), r_false) self.assertEqual(query(q, ('y',)).getresult(), r_true) self.assertEqual(query(q, (0,)).getresult(), r_false) self.assertEqual(query(q, (1,)).getresult(), r_true) self.assertEqual(query(q, (False,)).getresult(), r_false) self.assertEqual(query(q, (True,)).getresult(), r_true) finally: if bool_enabled_default is not None: pg.set_bool(bool_enabled_default) def test_query_with_bool_params_not_default(self): self.test_query_with_bool_params(bool_enabled=not pg.get_bool()) def test_query_with_int_params(self): query = self.c.query self.assertEqual(query("select 1+1").getresult(), [(2,)]) self.assertEqual(query("select 1+$1", (1,)).getresult(), [(2,)]) self.assertEqual(query("select 1+$1", [1]).getresult(), [(2,)]) self.assertEqual(query("select $1::integer", (2,)).getresult(), [(2,)]) self.assertEqual(query("select $1::text", (2,)).getresult(), [('2',)]) self.assertEqual( query("select 1+$1::numeric", [1]).getresult(), [(Decimal('2'),)]) self.assertEqual( query("select 1, $1::integer", (2,)).getresult(), [(1, 2)]) self.assertEqual( query("select 1 union select $1::integer", (2,)).getresult(), [(1,), (2,)]) self.assertEqual( query("select $1::integer+$2", (1, 2)).getresult(), [(3,)]) self.assertEqual( query("select $1::integer+$2", [1, 2]).getresult(), [(3,)]) self.assertEqual( query("select 0+$1+$2+$3+$4+$5+$6", list(range(6))).getresult(), [(15,)]) def test_query_with_str_params(self): query = self.c.query self.assertEqual( query("select $1||', world!'", ('Hello',)).getresult(), [('Hello, world!',)]) self.assertEqual( query("select $1||', world!'", ['Hello']).getresult(), [('Hello, world!',)]) self.assertEqual( query("select $1||', '||$2||'!'", ('Hello', 'world')).getresult(), [('Hello, world!',)]) self.assertEqual( query("select $1::text", ('Hello, world!',)).getresult(), [('Hello, world!',)]) self.assertEqual( query("select $1::text,$2::text", ('Hello', 'world')).getresult(), [('Hello', 'world')]) self.assertEqual( query("select $1::text,$2::text", ['Hello', 'world']).getresult(), [('Hello', 'world')]) self.assertEqual( query("select $1::text union select $2::text", ('Hello', 'world')).getresult(), [('Hello',), ('world',)]) try: query("select 'wörld'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') self.assertEqual( query("select $1||', '||$2||'!'", ('Hello', 'w\xc3\xb6rld')).getresult(), [('Hello, w\xc3\xb6rld!',)]) def test_query_with_unicode_params(self): query = self.c.query try: query('set client_encoding=utf8') self.assertEqual( query("select 'wörld'").getresult()[0][0], 'wörld') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support utf8") self.assertEqual( query("select $1||', '||$2||'!'", ('Hello', 'wörld')).getresult(), [('Hello, wörld!',)]) def test_query_with_unicode_params_latin1(self): query = self.c.query try: query('set client_encoding=latin1') self.assertEqual( query("select 'wörld'").getresult()[0][0], 'wörld') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") r = query("select $1||', '||$2||'!'", ('Hello', 'wörld')).getresult() self.assertEqual(r, [('Hello, wörld!',)]) self.assertRaises( UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', 'мир')) query('set client_encoding=iso_8859_1') r = query( "select $1||', '||$2||'!'", ('Hello', 'wörld')).getresult() self.assertEqual(r, [('Hello, wörld!',)]) self.assertRaises( UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', 'мир')) query('set client_encoding=sql_ascii') self.assertRaises( UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', 'wörld')) def test_query_with_unicode_params_cyrillic(self): query = self.c.query try: query('set client_encoding=iso_8859_5') self.assertEqual( query("select 'мир'").getresult()[0][0], 'мир') except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support cyrillic") self.assertRaises( UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', 'wörld')) r = query( "select $1||', '||$2||'!'", ('Hello', 'мир')).getresult() self.assertEqual(r, [('Hello, мир!',)]) query('set client_encoding=sql_ascii') self.assertRaises( UnicodeError, query, "select $1||', '||$2||'!'", ('Hello', 'мир!')) def test_query_with_mixed_params(self): self.assertEqual( self.c.query( "select $1+2,$2||', world!'", (1, 'Hello')).getresult(), [(3, 'Hello, world!')]) self.assertEqual( self.c.query( "select $1::integer,$2::date,$3::text", (4711, None, 'Hello!')).getresult(), [(4711, None, 'Hello!')]) def test_query_with_duplicate_params(self): self.assertRaises( pg.ProgrammingError, self.c.query, "select $1+$1", (1,)) self.assertRaises( pg.ProgrammingError, self.c.query, "select $1+$1", (1, 2)) def test_query_with_zero_params(self): self.assertEqual( self.c.query("select 1+1", []).getresult(), [(2,)]) def test_query_with_garbage(self): garbage = r"'\{}+()-#[]oo324" self.assertEqual( self.c.query("select $1::text AS garbage", (garbage,)).dictresult(), [{'garbage': garbage}]) class TestPreparedQueries(unittest.TestCase): """Test prepared queries via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') def tearDown(self): self.c.close() def test_empty_prepared_statement(self): self.c.prepare('', '') self.assertRaises(ValueError, self.c.query_prepared, '') def test_invalid_prepared_statement(self): self.assertRaises(pg.ProgrammingError, self.c.prepare, '', 'bad') def test_duplicate_prepared_statement(self): self.assertIsNone(self.c.prepare('q', 'select 1')) self.assertRaises(pg.ProgrammingError, self.c.prepare, 'q', 'select 2') def test_non_existent_prepared_statement(self): self.assertRaises( pg.OperationalError, self.c.query_prepared, 'does-not-exist') def test_unnamed_query_without_params(self): self.assertIsNone(self.c.prepare('', "select 'anon'")) self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)]) self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)]) def test_named_query_without_params(self): self.assertIsNone(self.c.prepare('hello', "select 'world'")) self.assertEqual( self.c.query_prepared('hello').getresult(), [('world',)]) def test_multiple_named_queries_without_params(self): self.assertIsNone(self.c.prepare('query17', "select 17")) self.assertIsNone(self.c.prepare('query42', "select 42")) self.assertEqual(self.c.query_prepared('query17').getresult(), [(17,)]) self.assertEqual(self.c.query_prepared('query42').getresult(), [(42,)]) def test_unnamed_query_with_params(self): self.assertIsNone(self.c.prepare('', "select $1 || ', ' || $2")) self.assertEqual( self.c.query_prepared('', ['hello', 'world']).getresult(), [('hello, world',)]) self.assertIsNone(self.c.prepare('', "select 1+ $1 + $2 + $3")) self.assertEqual( self.c.query_prepared('', [17, -5, 29]).getresult(), [(42,)]) def test_multiple_named_queries_with_params(self): self.assertIsNone(self.c.prepare('q1', "select $1 || '!'")) self.assertIsNone(self.c.prepare('q2', "select $1 || '-' || $2")) self.assertEqual( self.c.query_prepared('q1', ['hello']).getresult(), [('hello!',)]) self.assertEqual( self.c.query_prepared('q2', ['he', 'lo']).getresult(), [('he-lo',)]) def test_describe_non_existent_query(self): self.assertRaises( pg.OperationalError, self.c.describe_prepared, 'does-not-exist') def test_describe_unnamed_query(self): self.c.prepare('', "select 1::int, 'a'::char") r = self.c.describe_prepared('') self.assertEqual(r.listfields(), ('int4', 'bpchar')) def test_describe_named_query(self): self.c.prepare('myquery', "select 1 as first, 2 as second") r = self.c.describe_prepared('myquery') self.assertEqual(r.listfields(), ('first', 'second')) def test_describe_multiple_named_queries(self): self.c.prepare('query1', "select 1::int") self.c.prepare('query2', "select 1::int, 2::int") r = self.c.describe_prepared('query1') self.assertEqual(r.listfields(), ('int4',)) r = self.c.describe_prepared('query2') self.assertEqual(r.listfields(), ('int4', 'int4')) class TestQueryResultTypes(unittest.TestCase): """Test proper result types via a basic pg connection.""" def setUp(self): self.c = connect() self.c.query('set client_encoding=utf8') self.c.query("set datestyle='ISO,YMD'") self.c.query("set timezone='UTC'") def tearDown(self): self.c.close() def assert_proper_cast(self, value, pgtype, pytype): q = f'select $1::{pgtype}' try: r = self.c.query(q, (value,)).getresult()[0][0] except pg.ProgrammingError as e: if pgtype in ('json', 'jsonb'): self.skipTest('database does not support json') self.fail(str(e)) # noinspection PyUnboundLocalVariable self.assertIsInstance(r, pytype) if isinstance(value, str) and ( not value or ' ' in value or '{' in value): value = f'"{value}"' value = f'{{{value}}}' r = self.c.query(q + '[]', (value,)).getresult()[0][0] if pgtype.startswith(('date', 'time', 'interval')): # arrays of these are casted by the DB wrapper only self.assertEqual(r, value) else: self.assertIsInstance(r, list) self.assertEqual(len(r), 1) self.assertIsInstance(r[0], pytype) def test_int(self): self.assert_proper_cast(0, 'int', int) self.assert_proper_cast(0, 'smallint', int) self.assert_proper_cast(0, 'oid', int) self.assert_proper_cast(0, 'cid', int) self.assert_proper_cast(0, 'xid', int) def test_long(self): self.assert_proper_cast(0, 'bigint', int) def test_float(self): self.assert_proper_cast(0, 'float', float) self.assert_proper_cast(0, 'real', float) self.assert_proper_cast(0, 'double precision', float) self.assert_proper_cast('infinity', 'float', float) def test_numeric(self): decimal = pg.get_decimal() self.assert_proper_cast(decimal(0), 'numeric', decimal) self.assert_proper_cast(decimal(0), 'decimal', decimal) def test_money(self): decimal = pg.get_decimal() self.assert_proper_cast(decimal('0'), 'money', decimal) def test_bool(self): bool_type = bool if pg.get_bool() else str self.assert_proper_cast('f', 'bool', bool_type) def test_date(self): self.assert_proper_cast('1956-01-31', 'date', str) self.assert_proper_cast('10:20:30', 'interval', str) self.assert_proper_cast('08:42:15', 'time', str) self.assert_proper_cast('08:42:15+00', 'timetz', str) self.assert_proper_cast('1956-01-31 08:42:15', 'timestamp', str) self.assert_proper_cast('1956-01-31 08:42:15+00', 'timestamptz', str) def test_text(self): self.assert_proper_cast('', 'text', str) self.assert_proper_cast('', 'char', str) self.assert_proper_cast('', 'bpchar', str) self.assert_proper_cast('', 'varchar', str) def test_bytea(self): self.assert_proper_cast('', 'bytea', bytes) def test_json(self): self.assert_proper_cast('{}', 'json', dict) class TestQueryIterator(unittest.TestCase): """Test the query operating as an iterator.""" def setUp(self): self.c = connect() def tearDown(self): self.c.close() def test_len(self): r = self.c.query("select generate_series(3,7)") self.assertEqual(len(r), 5) def test_get_item(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(r[0], (7,)) self.assertEqual(r[1], (8,)) self.assertEqual(r[2], (9,)) def test_get_item_with_negative_index(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(r[-1], (9,)) self.assertEqual(r[-2], (8,)) self.assertEqual(r[-3], (7,)) def test_get_item_out_of_range(self): r = self.c.query("select generate_series(7,9)") self.assertRaises(IndexError, r.__getitem__, 3) def test_iterate(self): r = self.c.query("select generate_series(3,5)") self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) self.assertEqual(list(r), [(3,), (4,), (5,)]) # noinspection PyUnresolvedReferences self.assertIsInstance(r[1], tuple) def test_iterate_twice(self): r = self.c.query("select generate_series(3,5)") for _i in range(2): self.assertEqual(list(r), [(3,), (4,), (5,)]) def test_iterate_two_columns(self): r = self.c.query("select 1,2 union select 3,4") self.assertIsInstance(r, Iterable) self.assertEqual(list(r), [(1, 2), (3, 4)]) def test_next(self): r = self.c.query("select generate_series(7,9)") self.assertEqual(next(r), (7,)) self.assertEqual(next(r), (8,)) self.assertEqual(next(r), (9,)) self.assertRaises(StopIteration, next, r) def test_contains(self): r = self.c.query("select generate_series(7,9)") self.assertIn((8,), r) self.assertNotIn((5,), r) def test_dict_iterate(self): r = self.c.query("select generate_series(3,5) as n").dictiter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [dict(n=3), dict(n=4), dict(n=5)]) self.assertIsInstance(r[1], dict) def test_dict_iterate_two_columns(self): r = self.c.query( "select 1 as one, 2 as two" " union select 3 as one, 4 as two").dictiter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [dict(one=1, two=2), dict(one=3, two=4)]) def test_dict_next(self): r = self.c.query("select generate_series(7,9) as n").dictiter() self.assertEqual(next(r), dict(n=7)) self.assertEqual(next(r), dict(n=8)) self.assertEqual(next(r), dict(n=9)) self.assertRaises(StopIteration, next, r) def test_dict_contains(self): r = self.c.query("select generate_series(7,9) as n").dictiter() self.assertIn(dict(n=8), r) self.assertNotIn(dict(n=5), r) def test_named_iterate(self): r = self.c.query("select generate_series(3,5) as number").namediter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [(3,), (4,), (5,)]) self.assertIsInstance(r[1], tuple) self.assertEqual(r[1]._fields, ('number',)) self.assertEqual(r[1].number, 4) def test_named_iterate_two_columns(self): r = self.c.query( "select 1 as one, 2 as two" " union select 3 as one, 4 as two").namediter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [(1, 2), (3, 4)]) self.assertEqual(r[0]._fields, ('one', 'two')) self.assertEqual(r[0].one, 1) self.assertEqual(r[1]._fields, ('one', 'two')) self.assertEqual(r[1].two, 4) def test_named_next(self): r = self.c.query("select generate_series(7,9) as number").namediter() self.assertEqual(next(r), (7,)) self.assertEqual(next(r), (8,)) n = next(r) self.assertEqual(n._fields, ('number',)) self.assertEqual(n.number, 9) self.assertRaises(StopIteration, next, r) def test_named_contains(self): r = self.c.query("select generate_series(7,9)").namediter() self.assertIn((8,), r) self.assertNotIn((5,), r) def test_scalar_iterate(self): r = self.c.query("select generate_series(3,5)").scalariter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [3, 4, 5]) self.assertIsInstance(r[1], int) def test_scalar_iterate_two_columns(self): r = self.c.query("select 1, 2 union select 3, 4").scalariter() self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [1, 3]) def test_scalar_next(self): r = self.c.query("select generate_series(7,9)").scalariter() self.assertEqual(next(r), 7) self.assertEqual(next(r), 8) self.assertEqual(next(r), 9) self.assertRaises(StopIteration, next, r) def test_scalar_contains(self): r = self.c.query("select generate_series(7,9)").scalariter() self.assertIn(8, r) self.assertNotIn(5, r) class TestQueryOneSingleScalar(unittest.TestCase): """Test the query methods for getting single rows and columns.""" def setUp(self): self.c = connect() def tearDown(self): self.c.close() def test_one_with_empty_query(self): q = self.c.query("select 0 where false") self.assertIsNone(q.one()) def test_one_with_single_row(self): q = self.c.query("select 1, 2") r = q.one() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) self.assertEqual(q.one(), None) def test_one_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") self.assertEqual(q.one(), (1, 2)) self.assertEqual(q.one(), (3, 4)) self.assertEqual(q.one(), None) def test_one_dict_with_empty_query(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onedict()) def test_one_dict_with_single_row(self): q = self.c.query("select 1 as one, 2 as two") r = q.onedict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) self.assertEqual(q.onedict(), None) def test_one_dict_with_two_rows(self): q = self.c.query( "select 1 as one, 2 as two union select 3 as one, 4 as two") self.assertEqual(q.onedict(), dict(one=1, two=2)) self.assertEqual(q.onedict(), dict(one=3, two=4)) self.assertEqual(q.onedict(), None) def test_one_named_with_empty_query(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onenamed()) def test_one_named_with_single_row(self): q = self.c.query("select 1 as one, 2 as two") r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) self.assertEqual(q.onenamed(), None) def test_one_named_with_two_rows(self): q = self.c.query( "select 1 as one, 2 as two union select 3 as one, 4 as two") r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) r = q.onenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 3) self.assertEqual(r.two, 4) self.assertEqual(r, (3, 4)) self.assertEqual(q.onenamed(), None) def test_one_scalar_with_empty_query(self): q = self.c.query("select 0 where false") self.assertIsNone(q.onescalar()) def test_one_scalar_with_single_row(self): q = self.c.query("select 1, 2") r = q.onescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) self.assertEqual(q.onescalar(), None) def test_one_scalar_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") self.assertEqual(q.onescalar(), 1) self.assertEqual(q.onescalar(), 3) self.assertEqual(q.onescalar(), None) def test_single_with_empty_query(self): q = self.c.query("select 0 where false") try: q.single() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def test_single_with_single_row(self): q = self.c.query("select 1, 2") r = q.single() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) r = q.single() self.assertIsInstance(r, tuple) self.assertEqual(r, (1, 2)) def test_single_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.single() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def test_single_dict_with_empty_query(self): q = self.c.query("select 0 where false") try: q.singledict() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def test_single_dict_with_single_row(self): q = self.c.query("select 1 as one, 2 as two") r = q.singledict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) r = q.singledict() self.assertIsInstance(r, dict) self.assertEqual(r, dict(one=1, two=2)) def test_single_dict_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singledict() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def test_single_named_with_empty_query(self): q = self.c.query("select 0 where false") try: q.singlenamed() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def test_single_named_with_single_row(self): q = self.c.query("select 1 as one, 2 as two") r: Any = q.singlenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) r = q.singlenamed() self.assertEqual(r._fields, ('one', 'two')) self.assertEqual(r.one, 1) self.assertEqual(r.two, 2) self.assertEqual(r, (1, 2)) def test_single_named_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singlenamed() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def test_single_scalar_with_empty_query(self): q = self.c.query("select 0 where false") try: q.singlescalar() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.NoResultError) self.assertEqual(str(r), 'No result found') def test_single_scalar_with_single_row(self): q = self.c.query("select 1, 2") r = q.singlescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) r = q.singlescalar() self.assertIsInstance(r, int) self.assertEqual(r, 1) def test_single_scalar_with_two_rows(self): q = self.c.query("select 1, 2 union select 3, 4") try: q.singlescalar() except pg.InvalidResultError as e: r: Any = e else: r = None self.assertIsInstance(r, pg.MultipleResultsError) self.assertEqual(str(r), 'Multiple results found') def test_scalar_result(self): q = self.c.query("select 1, 2 union select 3, 4") r = q.scalarresult() self.assertIsInstance(r, list) self.assertEqual(r, [1, 3]) def test_scalar_iter(self): q = self.c.query("select 1, 2 union select 3, 4") r = q.scalariter() self.assertNotIsInstance(r, (list, tuple)) self.assertIsInstance(r, Iterable) r = list(r) self.assertEqual(r, [1, 3]) class TestInserttable(unittest.TestCase): """Test inserttable method.""" cls_set_up = False has_encoding = False @classmethod def setUpClass(cls): c = connect() c.query("drop table if exists test cascade") c.query("create table test (" "i2 smallint, i4 integer, i8 bigint," "b boolean, dt date, ti time," "d numeric, f4 real, f8 double precision, m money," "c char(1), v4 varchar(4), c4 char(4), t text)") # Check whether the test database uses SQL_ASCII - this means # that it does not consider encoding when calculating lengths. c.query("set client_encoding=utf8") try: c.query("select 'ä'") except (pg.DataError, pg.NotSupportedError): cls.has_encoding = False else: cls.has_encoding = c.query( "select length('ä') - length('a')").getresult()[0][0] == 0 c.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): c = connect() c.query("drop table test cascade") c.close() def setUp(self): self.assertTrue(self.cls_set_up) self.c = connect() self.c.query("set client_encoding=utf8") self.c.query("set datestyle='ISO,YMD'") self.c.query("set lc_monetary='C'") def tearDown(self): self.c.query("truncate table test") self.c.close() data: Sequence[tuple] = [ (-1, -1, -1, True, '1492-10-12', '08:30:00', -1.2345, -1.75, -1.875, '-1.25', '-', 'r?', '!u', 'xyz'), (0, 0, 0, False, '1607-04-14', '09:00:00', 0.0, 0.0, 0.0, '0.0', ' ', '0123', '4567', '890'), (1, 1, 1, True, '1801-03-04', '03:45:00', 1.23456, 1.75, 1.875, '1.25', 'x', 'bc', 'cdef', 'g'), (2, 2, 2, False, '1903-12-17', '11:22:00', 2.345678, 2.25, 2.125, '2.75', 'y', 'q', 'ijk', 'mnop\nstux!')] @classmethod def db_len(cls, s, encoding): # noinspection PyUnresolvedReferences if cls.has_encoding: s = s if isinstance(s, str) else s.decode(encoding) else: s = s.encode(encoding) if isinstance(s, str) else s return len(s) def get_back(self, encoding='utf-8'): """Convert boolean and decimal values back.""" data = [] for row in self.c.query("select * from test order by 1").getresult(): self.assertIsInstance(row, tuple) row = list(row) if row[0] is not None: # smallint self.assertIsInstance(row[0], int) if row[1] is not None: # integer self.assertIsInstance(row[1], int) if row[2] is not None: # bigint self.assertIsInstance(row[2], int) if row[3] is not None: # boolean self.assertIsInstance(row[3], bool) if row[4] is not None: # date self.assertIsInstance(row[4], str) self.assertTrue(row[4].replace('-', '').isdigit()) if row[5] is not None: # time self.assertIsInstance(row[5], str) self.assertTrue(row[5].replace(':', '').isdigit()) if row[6] is not None: # numeric self.assertIsInstance(row[6], Decimal) row[6] = float(row[6]) if row[7] is not None: # real self.assertIsInstance(row[7], float) if row[8] is not None: # double precision self.assertIsInstance(row[8], float) row[8] = float(row[8]) if row[9] is not None: # money self.assertIsInstance(row[9], Decimal) row[9] = str(float(row[9])) if row[10] is not None: # char(1) self.assertIsInstance(row[10], str) self.assertEqual(self.db_len(row[10], encoding), 1) if row[11] is not None: # varchar(4) self.assertIsInstance(row[11], str) self.assertLessEqual(self.db_len(row[11], encoding), 4) if row[12] is not None: # char(4) self.assertIsInstance(row[12], str) self.assertEqual(self.db_len(row[12], encoding), 4) row[12] = row[12].rstrip() if row[13] is not None: # text self.assertIsInstance(row[13], str) row = tuple(row) data.append(row) return data def test_inserttable1_row(self): data = self.data[2:3] self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def test_inserttable4_rows(self): data = self.data self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def test_inserttable_from_tuple_of_lists(self): data = tuple(list(row) for row in self.data) self.c.inserttable('test', data) self.assertEqual(self.get_back(), self.data) def test_inserttable_with_different_row_sizes(self): data = [*self.data[:-1], (self.data[-1][:-1],)] try: self.c.inserttable('test', data) except TypeError as e: self.assertIn( 'second arg must contain sequences of the same size', str(e)) else: self.assertFalse('expected an error') def test_inserttable_from_setof_tuples(self): data = {row for row in self.data} self.c.inserttable('test', data) self.assertEqual(self.get_back(), self.data) def test_inserttable_from_dict_as_interable(self): data = {row: None for row in self.data} self.c.inserttable('test', data) self.assertEqual(self.get_back(), self.data) def test_inserttable_from_dict_keys(self): data = {row: None for row in self.data} keys = data.keys() self.c.inserttable('test', keys) self.assertEqual(self.get_back(), self.data) def test_inserttable_from_dict_values(self): data = {i: row for i, row in enumerate(self.data)} values = data.values() self.c.inserttable('test', values) self.assertEqual(self.get_back(), self.data) def test_inserttable_from_generator_of_tuples(self): data = (row for row in self.data) self.c.inserttable('test', data) self.assertEqual(self.get_back(), self.data) def test_inserttable_from_list_of_sets(self): data = [set(row) for row in self.data] try: self.c.inserttable('test', data) except TypeError as e: self.assertIn( 'second argument must contain tuples or lists', str(e)) else: self.assertFalse('expected an error') def test_inserttable_multiple_rows(self): num_rows = 100 data = list(self.data[2:3]) * num_rows self.c.inserttable('test', data) r = self.c.query("select count(*) from test").getresult()[0][0] self.assertEqual(r, num_rows) def test_inserttable_multiple_calls(self): num_rows = 10 data = self.data[2:3] for _i in range(num_rows): self.c.inserttable('test', data) r = self.c.query("select count(*) from test").getresult()[0][0] self.assertEqual(r, num_rows) def test_inserttable_null_values(self): data = [(None,) * 14] * 100 self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def test_inserttable_no_column(self): data = [()] * 10 self.c.inserttable('test', data, []) self.assertEqual(self.get_back(), []) def test_inserttable_only_one_column(self): data: list[tuple] = [(42,)] * 50 self.c.inserttable('test', data, ['i4']) data = [tuple([42 if i == 1 else None for i in range(14)])] * 50 self.assertEqual(self.get_back(), data) def test_inserttable_only_two_columns(self): data: list[tuple] = [(bool(i % 2), i * .5) for i in range(20)] self.c.inserttable('test', data, ('b', 'f4')) # noinspection PyTypeChecker data = [(None,) * 3 + (bool(i % 2),) + (None,) * 3 + (i * .5,) + (None,) * 6 for i in range(20)] self.assertEqual(self.get_back(), data) def test_inserttable_with_dotted_table_name(self): data = self.data self.c.inserttable('public.test', data) self.assertEqual(self.get_back(), data) def test_inserttable_with_invalid_table_name(self): data = [(42,)] # check that the table name is not inserted unescaped # (this would pass otherwise since there is a column named i4) try: self.c.inserttable('test (i4)', data) except ValueError as e: self.assertIn('relation "test (i4)" does not exist', str(e)) else: self.assertFalse('expected an error') # make sure that it works if parameters are passed properly self.c.inserttable('test', data, ['i4']) def test_inserttable_with_invalid_data_type(self): try: self.c.inserttable('test', 42) except TypeError as e: self.assertIn('expects an iterable as second argument', str(e)) else: self.assertFalse('expected an error') def test_inserttable_with_invalid_column_name(self): data = [(2, 4)] # check that the column names are not inserted unescaped # (this would pass otherwise since there are columns i2 and i4) try: self.c.inserttable('test', data, ['i2,i4']) except ValueError as e: self.assertIn( 'column "i2,i4" of relation "test" does not exist', str(e)) else: self.assertFalse('expected an error') # make sure that it works if parameters are passed properly self.c.inserttable('test', data, ['i2', 'i4']) def test_inserttable_with_invalid_colum_list(self): data = self.data try: self.c.inserttable('test', data, 'invalid') except TypeError as e: self.assertIn( 'expects a tuple or a list as third argument', str(e)) else: self.assertFalse('expected an error') def test_inserttable_with_huge_list_of_column_names(self): data = self.data # try inserting data with a huge list of column names cols = ['very_long_column_name'] * 2000 # Should raise a value error because the column does not exist self.assertRaises(ValueError, self.c.inserttable, 'test', data, cols) # double the size, should catch buffer overflow and raise memory error cols *= 2 self.assertRaises(MemoryError, self.c.inserttable, 'test', data, cols) def test_inserttable_with_out_of_range_data(self): # try inserting data out of range for the column type # Should raise a value error because of smallint out of range self.assertRaises( ValueError, self.c.inserttable, 'test', [[33000]], ['i2']) def test_inserttable_max_values(self): data = [(2 ** 15 - 1, 2 ** 31 - 1, 2 ** 31 - 1, True, '2999-12-31', '11:59:59', 1e99, 1.0 + 1.0 / 32, 1.0 + 1.0 / 32, None, "1", "1234", "1234", "1234" * 100)] self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def test_inserttable_byte_values(self): try: self.c.query("select '€', 'käse', 'сыр', 'pont-l''évêque'") except pg.DataError: self.skipTest("database does not support utf8") # non-ascii chars do not fit in char(1) when there is no encoding c = '€' if self.has_encoding else '$' row_unicode = ( 0, 0, 0, False, '1970-01-01', '00:00:00', 0.0, 0.0, 0.0, '0.0', c, 'bäd', 'bäd', "käse сыр pont-l'évêque") row_bytes = tuple( s.encode() if isinstance(s, str) else s for s in row_unicode) data = [row_bytes] * 2 self.c.inserttable('test', data) data = [row_unicode] * 2 self.assertEqual(self.get_back(), data) def test_inserttable_unicode_utf8(self): try: self.c.query("select '€', 'käse', 'сыр', 'pont-l''évêque'") except pg.DataError: self.skipTest("database does not support utf8") # non-ascii chars do not fit in char(1) when there is no encoding c = '€' if self.has_encoding else '$' row_unicode = ( 0, 0, 0, False, '1970-01-01', '00:00:00', 0.0, 0.0, 0.0, '0.0', c, 'bäd', 'bäd', "käse сыр pont-l'évêque") data = [row_unicode] * 2 self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) def test_inserttable_unicode_latin1(self): try: self.c.query("set client_encoding=latin1") self.c.query("select '¥'") except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin1") # non-ascii chars do not fit in char(1) when there is no encoding c = '€' if self.has_encoding else '$' row_unicode: tuple = ( 0, 0, 0, False, '1970-01-01', '00:00:00', 0.0, 0.0, 0.0, '0.0', c, 'bäd', 'bäd', "for käse and pont-l'évêque pay in €") data = [row_unicode] # cannot encode € sign with latin1 encoding self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data) row_unicode = tuple( s.replace('€', '¥') if isinstance(s, str) else s for s in row_unicode) data = [row_unicode] * 2 self.c.inserttable('test', data) self.assertEqual(self.get_back('latin1'), data) def test_inserttable_unicode_latin9(self): try: self.c.query("set client_encoding=latin9") self.c.query("select '€'") except (pg.DataError, pg.NotSupportedError): self.skipTest("database does not support latin9") return # non-ascii chars do not fit in char(1) when there is no encoding c = '€' if self.has_encoding else '$' row_unicode = ( 0, 0, 0, False, '1970-01-01', '00:00:00', 0.0, 0.0, 0.0, '0.0', c, 'bäd', 'bäd', "for käse and pont-l'évêque pay in €") data = [row_unicode] * 2 self.c.inserttable('test', data) self.assertEqual(self.get_back('latin9'), data) def test_inserttable_no_encoding(self): self.c.query("set client_encoding=sql_ascii") # non-ascii chars do not fit in char(1) when there is no encoding c = '€' if self.has_encoding else '$' row_unicode = ( 0, 0, 0, False, '1970-01-01', '00:00:00', 0.0, 0.0, 0.0, '0.0', c, 'bäd', 'bäd', "for käse and pont-l'évêque pay in €") data = [row_unicode] # cannot encode non-ascii unicode without a specific encoding self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data) def test_inserttable_from_query(self): data = self.c.query( "select 2::int2 as i2, 4::int4 as i4, 8::int8 as i8, true as b," "null as dt, null as ti, null as d," "4.5::float as float4, 8.5::float8 as f8," "null as m, 'c' as c, 'v4' as v4, null as c4, 'text' as text") self.c.inserttable('test', data) self.assertEqual(self.get_back(), [ (2, 4, 8, True, None, None, None, 4.5, 8.5, None, 'c', 'v4', None, 'text')]) def test_inserttable_special_chars(self): class S: def __repr__(self): return s s = '1\'2"3\b4\f5\n6\r7\t8\b9\\0' s1 = s.encode('ascii') s2 = S() data = [(t,) for t in (s, s1, s2)] self.c.inserttable('test', data, ['t']) self.assertEqual( self.c.query('select t from test').getresult(), [(s,)] * 3) def test_insert_table_big_row_size(self): # inserting rows with a size of up to 64k bytes should work t = '*' * 50000 data = [(t,)] self.c.inserttable('test', data, ['t']) self.assertEqual( self.c.query('select t from test').getresult(), data) # double the size, should catch buffer overflow and raise memory error t *= 2 data = [(t,)] self.assertRaises(MemoryError, self.c.inserttable, 'test', data, ['t']) def test_insert_table_small_int_overflow(self): rest_row = self.data[2][1:] data = [(32000, *rest_row)] self.c.inserttable('test', data) self.assertEqual(self.get_back(), data) data = [(33000, *rest_row)] try: self.c.inserttable('test', data) except ValueError as e: self.assertIn( 'value "33000" is out of range for type smallint', str(e)) else: self.assertFalse('expected an error') class TestDirectSocketAccess(unittest.TestCase): """Test copy command with direct socket access.""" cls_set_up = False @classmethod def setUpClass(cls): c = connect() c.query("drop table if exists test cascade") c.query("create table test (i int, v varchar(16))") c.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): c = connect() c.query("drop table test cascade") c.close() def setUp(self): self.assertTrue(self.cls_set_up) self.c = connect() self.c.query("set client_encoding=utf8") def tearDown(self): self.c.query("truncate table test") self.c.close() def test_putline(self): putline = self.c.putline query = self.c.query data = list(enumerate("apple pear plum cherry banana".split())) query("copy test from stdin") try: for i, v in data: putline(f"{i}\t{v}\n") finally: self.c.endcopy() r = query("select * from test").getresult() self.assertEqual(r, data) def test_putline_bytes_and_unicode(self): putline = self.c.putline query = self.c.query try: query("select 'käse+würstel'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') query("copy test from stdin") try: putline("47\tkäse\n".encode()) putline("35\twürstel\n") finally: self.c.endcopy() r = query("select * from test").getresult() self.assertEqual(r, [(47, 'käse'), (35, 'würstel')]) def test_getline(self): getline = self.c.getline query = self.c.query data = list(enumerate("apple banana pear plum strawberry".split())) n = len(data) self.c.inserttable('test', data) query("copy test to stdout") try: for i in range(n + 1): v = getline() if i < n: # noinspection PyStringFormat self.assertEqual(v, '{}\t{}'.format(*data[i])) elif i == n: self.assertIsNone(v) finally: with suppress(OSError): self.c.endcopy() def test_getline_bytes_and_unicode(self): getline = self.c.getline query = self.c.query try: query("select 'käse+würstel'") except (pg.DataError, pg.NotSupportedError): self.skipTest('database does not support utf8') data = [(54, 'käse'.encode()), (73, 'würstel')] self.c.inserttable('test', data) query("copy test to stdout") try: v = getline() self.assertIsInstance(v, str) self.assertEqual(v, '54\tkäse') v = getline() self.assertIsInstance(v, str) self.assertEqual(v, '73\twürstel') self.assertIsNone(getline()) finally: with suppress(OSError): self.c.endcopy() def test_parameter_checks(self): self.assertRaises(TypeError, self.c.putline) self.assertRaises(TypeError, self.c.getline, 'invalid') self.assertRaises(TypeError, self.c.endcopy, 'invalid') class TestNotificatons(unittest.TestCase): """Test notification support.""" def setUp(self): self.c = connect() def tearDown(self): self.doCleanups() self.c.close() def test_get_notify(self): getnotify = self.c.getnotify query = self.c.query self.assertIsNone(getnotify()) query('listen test_notify') try: self.assertIsNone(self.c.getnotify()) query("notify test_notify") r = getnotify() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertIsInstance(r[0], str) self.assertIsInstance(r[1], int) self.assertIsInstance(r[2], str) self.assertEqual(r[0], 'test_notify') self.assertEqual(r[2], '') self.assertIsNone(self.c.getnotify()) query("notify test_notify, 'test_payload'") r = getnotify() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertIsInstance(r[0], str) self.assertIsInstance(r[1], int) self.assertIsInstance(r[2], str) self.assertEqual(r[0], 'test_notify') self.assertEqual(r[2], 'test_payload') self.assertIsNone(getnotify()) finally: query('unlisten test_notify') def test_get_notice_receiver(self): self.assertIsNone(self.c.get_notice_receiver()) def test_set_notice_receiver(self): self.assertRaises(TypeError, self.c.set_notice_receiver, 42) self.assertRaises(TypeError, self.c.set_notice_receiver, 'invalid') self.assertIsNone(self.c.set_notice_receiver(lambda notice: None)) self.assertIsNone(self.c.set_notice_receiver(None)) def test_set_and_get_notice_receiver(self): r = lambda notice: None # noqa: E731 self.assertIsNone(self.c.set_notice_receiver(r)) self.assertIs(self.c.get_notice_receiver(), r) self.assertIsNone(self.c.set_notice_receiver(None)) self.assertIsNone(self.c.get_notice_receiver()) def test_notice_receiver(self): self.addCleanup(self.c.query, 'drop function bilbo_notice();') self.c.query('''create function bilbo_notice() returns void AS $$ begin raise warning 'Bilbo was here!'; end; $$ language plpgsql''') received = {} def notice_receiver(notice): for attr in dir(notice): if attr.startswith('__'): continue value = getattr(notice, attr) if isinstance(value, str): value = value.replace('WARNUNG', 'WARNING') received[attr] = value self.c.set_notice_receiver(notice_receiver) self.c.query('select bilbo_notice()') self.assertEqual(received, dict( pgcnx=self.c, message='WARNING: Bilbo was here!\n', severity='WARNING', primary='Bilbo was here!', detail=None, hint=None)) class TestConfigFunctions(unittest.TestCase): """Test the functions for changing default settings. To test the effect of most of these functions, we need a database connection. That's why they are covered in this test module. """ def setUp(self): self.c = connect() self.c.query("set client_encoding=utf8") self.c.query('set bytea_output=hex') self.c.query("set lc_monetary='C'") def tearDown(self): self.c.close() def test_get_decimal_point(self): point = pg.get_decimal_point() # error if a parameter is passed self.assertRaises(TypeError, pg.get_decimal_point, point) self.assertIsInstance(point, str) self.assertEqual(point, '.') # the default setting pg.set_decimal_point(',') try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertEqual(r, ',') pg.set_decimal_point("'") try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertEqual(r, "'") pg.set_decimal_point('') try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsNone(r) pg.set_decimal_point(None) try: r = pg.get_decimal_point() finally: pg.set_decimal_point(point) self.assertIsNone(r) def test_set_decimal_point(self): d = Decimal point = pg.get_decimal_point() self.assertRaises(TypeError, pg.set_decimal_point) # error if decimal point is not a string self.assertRaises(TypeError, pg.set_decimal_point, 0) # error if more than one decimal point passed self.assertRaises(TypeError, pg.set_decimal_point, '.', ',') self.assertRaises(TypeError, pg.set_decimal_point, '.,') # error if decimal point is not a punctuation character self.assertRaises(TypeError, pg.set_decimal_point, '0') query = self.c.query # check that money values are interpreted as decimal values # only if decimal_point is set, and that the result is correct # only if it is set suitable for the current lc_monetary setting select_money = "select '34.25'::money" proper_money = d('34.25') bad_money = d('3425') en_locales = 'en', 'en_US', 'en_US.utf8', 'en_US.UTF-8' en_money = '$34.25', '$ 34.25', '34.25$', '34.25 $', '34.25 Dollar' de_locales = 'de', 'de_DE', 'de_DE.utf8', 'de_DE.UTF-8' de_money = ( '34,25€', '34,25 €', '€34,25', '€ 34,25', 'EUR34,25', 'EUR 34,25', '34,25 EUR', '34,25 Euro', '34,25 DM') # first try with English localization (using the point) for lc in en_locales: try: query(f"set lc_monetary='{lc}'") except pg.DataError: pass else: break else: self.skipTest("cannot set English money locale") try: query(select_money) except (pg.DataError, pg.ProgrammingError): # this can happen if the currency signs cannot be # converted using the encoding of the test database self.skipTest("database does not support English money") pg.set_decimal_point(None) try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, en_money) pg.set_decimal_point('') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, en_money) pg.set_decimal_point('.') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, proper_money) pg.set_decimal_point(',') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, bad_money) pg.set_decimal_point("'") try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, bad_money) # then try with German localization (using the comma) for lc in de_locales: try: query(f"set lc_monetary='{lc}'") except pg.DataError: pass else: break else: self.skipTest("cannot set German money locale") select_money = select_money.replace('.', ',') try: query(select_money) except (pg.DataError, pg.ProgrammingError): self.skipTest("database does not support German money") pg.set_decimal_point(None) try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, de_money) pg.set_decimal_point('') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertIn(r, de_money) pg.set_decimal_point(',') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertIsInstance(r, d) self.assertEqual(r, proper_money) pg.set_decimal_point('.') try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertEqual(r, bad_money) pg.set_decimal_point("'") try: r = query(select_money).getresult()[0][0] finally: pg.set_decimal_point(point) self.assertEqual(r, bad_money) def test_get_decimal(self): decimal_class = pg.get_decimal() # error if a parameter is passed self.assertRaises(TypeError, pg.get_decimal, decimal_class) self.assertIs(decimal_class, Decimal) # the default setting pg.set_decimal(int) try: r = pg.get_decimal() finally: pg.set_decimal(decimal_class) self.assertIs(r, int) r = pg.get_decimal() self.assertIs(r, decimal_class) def test_set_decimal(self): decimal_class = pg.get_decimal() # error if no parameter is passed self.assertRaises(TypeError, pg.set_decimal) query = self.c.query try: r = query("select 3425::numeric") except pg.DatabaseError: self.skipTest('database does not support numeric') r = r.getresult()[0][0] self.assertIsInstance(r, decimal_class) self.assertEqual(r, decimal_class('3425')) r = query("select 3425::numeric") pg.set_decimal(int) try: r = r.getresult()[0][0] finally: pg.set_decimal(decimal_class) self.assertNotIsInstance(r, decimal_class) self.assertIsInstance(r, int) self.assertEqual(r, 3425) def test_get_bool(self): use_bool = pg.get_bool() # error if a parameter is passed self.assertRaises(TypeError, pg.get_bool, use_bool) self.assertIsInstance(use_bool, bool) self.assertIs(use_bool, True) # the default setting pg.set_bool(False) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(True) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bool(0) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(1) try: r = pg.get_bool() finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) def test_set_bool(self): use_bool = pg.get_bool() # error if no parameter is passed self.assertRaises(TypeError, pg.set_bool) query = self.c.query try: r = query("select true::bool") except pg.ProgrammingError: self.skipTest('database does not support bool') r = r.getresult()[0][0] self.assertIsInstance(r, bool) self.assertEqual(r, True) pg.set_bool(False) try: r = query("select true::bool").getresult()[0][0] finally: pg.set_bool(use_bool) self.assertIsInstance(r, str) self.assertEqual(r, 't') pg.set_bool(True) try: r = query("select true::bool").getresult()[0][0] finally: pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, True) def test_get_byte_escaped(self): bytea_escaped = pg.get_bytea_escaped() # error if a parameter is passed self.assertRaises(TypeError, pg.get_bytea_escaped, bytea_escaped) self.assertIsInstance(bytea_escaped, bool) self.assertIs(bytea_escaped, False) # the default setting pg.set_bytea_escaped(True) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(False) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bytea_escaped(1) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(0) try: r = pg.get_bytea_escaped() finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, False) def test_set_bytea_escaped(self): bytea_escaped = pg.get_bytea_escaped() # error if no parameter is passed self.assertRaises(TypeError, pg.set_bytea_escaped) query = self.c.query try: r = query("select 'data'::bytea") except pg.ProgrammingError: self.skipTest('database does not support bytea') r = r.getresult()[0][0] self.assertIsInstance(r, bytes) self.assertEqual(r, b'data') pg.set_bytea_escaped(True) try: r = query("select 'data'::bytea").getresult()[0][0] finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, str) self.assertEqual(r, '\\x64617461') pg.set_bytea_escaped(False) try: r = query("select 'data'::bytea").getresult()[0][0] finally: pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bytes) self.assertEqual(r, b'data') def test_change_row_factory_cache_size(self): cache = pg.RowCache queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc'] query = self.c.query for maxsize in (None, 0, 1, 2, 3, 10, 1024): cache.change_size(maxsize) for _i in range(3): for q in queries: r = query(q).namedresult()[0] if q.endswith('abc'): self.assertEqual(r, (123,)) self.assertEqual(r._fields, ('abc',)) else: self.assertEqual(r, (1, 2, 3)) self.assertEqual(r._fields, ('a', 'b', 'c')) info = cache.row_factory.cache_info() self.assertEqual(info.maxsize, maxsize) self.assertEqual(info.hits + info.misses, 6) self.assertEqual(info.hits, 0 if maxsize is not None and maxsize < 2 else 4) class TestStandaloneEscapeFunctions(unittest.TestCase): """Test pg escape functions. The libpq interface memorizes some parameters of the last opened connection that influence the result of these functions. Therefore we need to open a connection with fixed parameters prior to testing in order to ensure that the tests always run under the same conditions. That's why these tests are included in this test module. """ cls_set_up = False @classmethod def setUpClass(cls): db = connect() query = db.query query('set client_encoding=sql_ascii') query('set standard_conforming_strings=off') query('set bytea_output=escape') db.close() cls.cls_set_up = True def test_escape_string(self): self.assertTrue(self.cls_set_up) f = pg.escape_string b = f(b'plain') self.assertIsInstance(b, bytes) self.assertEqual(b, b'plain') s = f('plain') self.assertIsInstance(s, str) self.assertEqual(s, 'plain') b = f("das is' käse".encode()) self.assertIsInstance(b, bytes) self.assertEqual(b, "das is'' käse".encode()) s = f("that's cheesy") self.assertIsInstance(s, str) self.assertEqual(s, "that''s cheesy") s = f(r"It's bad to have a \ inside.") self.assertEqual(s, r"It''s bad to have a \\ inside.") def test_escape_bytea(self): self.assertTrue(self.cls_set_up) f = pg.escape_bytea b = f(b'plain') self.assertIsInstance(b, bytes) self.assertEqual(b, b'plain') s = f('plain') self.assertIsInstance(s, str) self.assertEqual(s, 'plain') b = f("das is' käse".encode()) self.assertIsInstance(b, bytes) self.assertEqual(b, b"das is'' k\\\\303\\\\244se") s = f("that's cheesy") self.assertIsInstance(s, str) self.assertEqual(s, "that''s cheesy") b = f(b'O\x00ps\xff!') self.assertEqual(b, b'O\\\\000ps\\\\377!') if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_classic_dbwrapper.py000077500000000000000000006134331450706350600232620ustar00rootroot00000000000000#!/usr/bin/python """Test the classic PyGreSQL interface. Sub-tests for the DB wrapper object. Contributed by Christoph Zwerschke. These tests need a database to test against. """ from __future__ import annotations import gc import json import os import sys import tempfile import unittest from contextlib import suppress from datetime import date, datetime, time, timedelta from decimal import Decimal from io import StringIO from operator import itemgetter from time import strftime from typing import Any, Callable, ClassVar from uuid import UUID import pg # the module under test from .config import dbhost, dbname, dbpasswd, dbport, dbuser debug = False # let DB wrapper print debugging output windows = os.name == 'nt' # There is a known a bug in libpq under Windows which can cause # the interface to crash when calling PQhost(): do_not_ask_for_host = windows do_not_ask_for_host_reason = 'libpq issue on Windows' def DB(): # noqa: N802 """Create a DB wrapper object connecting to the test database.""" db = pg.DB(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd) if debug: db.debug = debug db.query("set client_min_messages=warning") return db class TestDBClassInit(unittest.TestCase): """Test proper handling of errors when creating DB instances.""" def test_bad_params(self): self.assertRaises(TypeError, pg.DB, invalid=True) # noinspection PyUnboundLocalVariable def test_delete_db(self): db = DB() del db.db self.assertRaises(pg.InternalError, db.close) del db def test_async_query_before_deletion(self): db = DB() query = db.send_query('select 1') self.assertEqual(query.getresult(), [(1,)]) self.assertIsNone(query.getresult()) self.assertIsNone(query.getresult()) del db gc.collect() def test_async_query_after_deletion(self): db = DB() query = db.send_query('select 1') del db gc.collect() self.assertIsNone(query.getresult()) self.assertIsNone(query.getresult()) class TestDBClassBasic(unittest.TestCase): """Test existence of the DB class wrapped pg connection methods.""" def setUp(self): self.db = DB() def tearDown(self): with suppress(pg.InternalError): self.db.close() def test_all_db_attributes(self): attributes = [ 'abort', 'adapter', 'backend_pid', 'begin', 'cancel', 'clear', 'close', 'commit', 'date_format', 'db', 'dbname', 'dbtypes', 'debug', 'decode_json', 'delete', 'delete_prepared', 'describe_prepared', 'encode_json', 'end', 'endcopy', 'error', 'escape_bytea', 'escape_identifier', 'escape_literal', 'escape_string', 'fileno', 'get', 'get_as_dict', 'get_as_list', 'get_attnames', 'get_cast_hook', 'get_databases', 'get_generated', 'get_notice_receiver', 'get_parameter', 'get_relations', 'get_tables', 'getline', 'getlo', 'getnotify', 'has_table_privilege', 'host', 'insert', 'inserttable', 'is_non_blocking', 'locreate', 'loimport', 'notification_handler', 'options', 'parameter', 'pkey', 'pkeys', 'poll', 'port', 'prepare', 'protocol_version', 'putline', 'query', 'query_formatted', 'query_prepared', 'release', 'reopen', 'reset', 'rollback', 'savepoint', 'send_query', 'server_version', 'set_cast_hook', 'set_non_blocking', 'set_notice_receiver', 'set_parameter', 'socket', 'source', 'ssl_attributes', 'ssl_in_use', 'start', 'status', 'transaction', 'truncate', 'unescape_bytea', 'update', 'upsert', 'use_regtypes', 'user', ] db_attributes = [a for a in self.db.__dir__() if not a.startswith('_')] self.assertEqual(attributes, db_attributes) def test_attribute_db(self): self.assertEqual(self.db.db.db, dbname) def test_attribute_dbname(self): self.assertEqual(self.db.dbname, dbname) def test_attribute_error(self): error = self.db.error self.assertTrue(not error or 'krb5_' in error) self.assertEqual(self.db.error, self.db.db.error) @unittest.skipIf(do_not_ask_for_host, do_not_ask_for_host_reason) def test_attribute_host(self): host = dbhost if dbhost and not dbhost.startswith('/') else 'localhost' self.assertIsInstance(self.db.host, str) self.assertEqual(self.db.host, host) self.assertEqual(self.db.db.host, host) def test_attribute_options(self): no_options = '' options = self.db.options self.assertEqual(options, no_options) self.assertEqual(options, self.db.db.options) def test_attribute_port(self): def_port = 5432 port = self.db.port self.assertIsInstance(port, int) self.assertEqual(port, dbport or def_port) self.assertEqual(port, self.db.db.port) def test_attribute_protocol_version(self): protocol_version = self.db.protocol_version self.assertIsInstance(protocol_version, int) self.assertTrue(2 <= protocol_version < 4) self.assertEqual(protocol_version, self.db.db.protocol_version) def test_attribute_server_version(self): server_version = self.db.server_version self.assertIsInstance(server_version, int) self.assertGreaterEqual(server_version, 100000) self.assertLess(server_version, 170000) self.assertEqual(server_version, self.db.db.server_version) def test_attribute_socket(self): socket = self.db.socket self.assertIsInstance(socket, int) self.assertGreaterEqual(socket, 0) def test_attribute_backend_pid(self): backend_pid = self.db.backend_pid self.assertIsInstance(backend_pid, int) self.assertGreaterEqual(backend_pid, 1) def test_attribute_ssl_in_use(self): ssl_in_use = self.db.ssl_in_use self.assertIsInstance(ssl_in_use, bool) self.assertFalse(ssl_in_use) def test_attribute_ssl_attributes(self): ssl_attributes = self.db.ssl_attributes self.assertIsInstance(ssl_attributes, dict) if ssl_attributes: self.assertEqual(ssl_attributes, { 'cipher': None, 'compression': None, 'key_bits': None, 'library': None, 'protocol': None}) def test_attribute_status(self): status_ok = 1 status = self.db.status self.assertIsInstance(status, int) self.assertEqual(status, status_ok) self.assertEqual(status, self.db.db.status) def test_attribute_user(self): no_user = 'Deprecated facility' user = self.db.user self.assertTrue(user) self.assertIsInstance(user, str) self.assertNotEqual(user, no_user) self.assertEqual(user, self.db.db.user) def test_method_escape_literal(self): self.assertEqual(self.db.escape_literal(''), "''") def test_method_escape_identifier(self): self.assertEqual(self.db.escape_identifier(''), '""') def test_method_escape_string(self): self.assertEqual(self.db.escape_string(''), '') def test_method_escape_bytea(self): self.assertEqual(self.db.escape_bytea('').replace( '\\x', '').replace('\\', ''), '') def test_method_unescape_bytea(self): self.assertEqual(self.db.unescape_bytea(''), b'') def test_method_decode_json(self): self.assertEqual(self.db.decode_json('{}'), {}) def test_method_encode_json(self): self.assertEqual(self.db.encode_json({}), '{}') def test_method_query(self): query = self.db.query query("select 1+1") query("select 1+$1+$2", 2, 3) query("select 1+$1+$2", (2, 3)) query("select 1+$1+$2", [2, 3]) query("select 1+$1", 1) def test_method_query_empty(self): self.assertRaises(ValueError, self.db.query, '') def test_method_query_data_error(self): try: self.db.query("select 1/0") except pg.DataError as error: # noinspection PyUnresolvedReferences self.assertEqual(error.sqlstate, '22012') def test_method_endcopy(self): with suppress(OSError): self.db.endcopy() def test_method_close(self): self.db.close() try: self.db.reset() except pg.Error: pass else: self.fail('Reset should give an error for a closed connection') self.assertIsNone(self.db.db) self.assertRaises(pg.InternalError, self.db.close) self.assertRaises(pg.InternalError, self.db.query, 'select 1') self.assertRaises(pg.InternalError, getattr, self.db, 'status') self.assertRaises(pg.InternalError, getattr, self.db, 'error') self.assertRaises(pg.InternalError, getattr, self.db, 'absent') def test_method_reset(self): con = self.db.db self.db.reset() self.assertIs(self.db.db, con) self.db.query("select 1+1") self.db.close() self.assertRaises(pg.InternalError, self.db.reset) def test_method_reopen(self): con = self.db.db self.db.reopen() self.assertIsNot(self.db.db, con) con = self.db.db self.db.query("select 1+1") self.db.close() self.db.reopen() self.assertIsNot(self.db.db, con) self.db.query("select 1+1") self.db.close() def test_existing_connection(self): db = pg.DB(self.db.db) self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) self.assertIsNotNone(self.db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db = pg.DB(self.db) self.assertEqual(self.db.db, db.db) assert self.db.db is not None db = pg.DB(db=self.db.db) self.assertEqual(self.db.db, db.db) def test_existing_db_api2_connection(self): class FakeDbApi2Connection: def __init__(self, cnx): self._cnx = cnx def close(self): self._cnx.close() db2 = FakeDbApi2Connection(self.db.db) db = pg.DB(db2) # type: ignore self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db.reopen() self.assertIsNotNone(db.db) self.assertEqual(self.db.db, db.db) db.close() self.assertIsNone(db.db) db2.close() class TestDBClass(unittest.TestCase): """Test the methods of the DB class wrapped pg connection.""" maxDiff = 80 * 20 cls_set_up = False regtypes = None supports_oids = False @classmethod def setUpClass(cls): db = DB() cls.supports_oids = db.server_version < 120000 db.query("drop table if exists test cascade") db.query("create table test (" "i2 smallint, i4 integer, i8 bigint," " d numeric, f4 real, f8 double precision, m money," " v4 varchar(4), c4 char(4), t text)") db.query("create or replace view test_view as" " select i4, v4 from test") db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() db.query("drop table test cascade") db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() if self.regtypes is None: self.regtypes = self.db.use_regtypes() else: self.db.use_regtypes(self.regtypes) query = self.db.query query('set client_encoding=utf8') query("set lc_monetary='C'") query("set datestyle='ISO,YMD'") query('set standard_conforming_strings=on') query('set bytea_output=hex') def tearDown(self): self.doCleanups() self.db.close() def create_table(self, table, definition, temporary=True, oids=None, values=None): query = self.db.query if '"' not in table or '.' in table: table = f'"{table}"' if not temporary: q = f'drop table if exists {table} cascade' query(q) self.addCleanup(query, q) temporary = 'temporary table' if temporary else 'table' as_query = definition.startswith(('as ', 'AS ')) if not as_query and not definition.startswith('('): definition = f'({definition})' with_oids = 'with oids' if oids else ( 'without oids' if self.supports_oids else '') cmd_parts = ['create', temporary, table] if as_query: cmd_parts.extend([with_oids, definition]) else: cmd_parts.extend([definition, with_oids]) cmd = ' '.join(cmd_parts) query(cmd) if values: for params in values: if not isinstance(params, (list, tuple)): params = [params] values = ', '.join(f'${n + 1}' for n in range(len(params))) cmd = f"insert into {table} values ({values})" query(cmd, params) def test_class_name(self): self.assertEqual(self.db.__class__.__name__, 'DB') def test_module_name(self): self.assertEqual(self.db.__module__, 'pg.db') self.assertEqual(self.db.__class__.__module__, 'pg.db') def test_escape_literal(self): f = self.db.escape_literal r: Any = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"'plain'") r = f("plain") self.assertIsInstance(r, str) self.assertEqual(r, "'plain'") r = f("that's käse".encode()) self.assertIsInstance(r, bytes) self.assertEqual(r, "'that''s käse'".encode()) r = f("that's käse") self.assertIsInstance(r, str) self.assertEqual(r, "'that''s käse'") self.assertEqual(f(r"It's fine to have a \ inside."), r" E'It''s fine to have a \\ inside.'") self.assertEqual(f('No "quotes" must be escaped.'), "'No \"quotes\" must be escaped.'") def test_escape_identifier(self): f = self.db.escape_identifier r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b'"plain"') r = f("plain") self.assertIsInstance(r, str) self.assertEqual(r, '"plain"') r = f("that's käse".encode()) self.assertIsInstance(r, bytes) self.assertEqual(r, '"that\'s käse"'.encode()) r = f("that's käse") self.assertIsInstance(r, str) self.assertEqual(r, '"that\'s käse"') self.assertEqual(f(r"It's fine to have a \ inside."), '"It\'s fine to have a \\ inside."') self.assertEqual(f('All "quotes" must be escaped.'), '"All ""quotes"" must be escaped."') def test_escape_string(self): f = self.db.escape_string r = f(b"plain") self.assertIsInstance(r, bytes) self.assertEqual(r, b"plain") r = f("plain") self.assertIsInstance(r, str) self.assertEqual(r, "plain") r = f("that's käse".encode()) self.assertIsInstance(r, bytes) self.assertEqual(r, "that''s käse".encode()) r = f("that's käse") self.assertIsInstance(r, str) self.assertEqual(r, "that''s käse") self.assertEqual(f(r"It's fine to have a \ inside."), r"It''s fine to have a \ inside.") def test_escape_bytea(self): f = self.db.escape_bytea # note that escape_byte always returns hex output since Pg 9.0, # regardless of the bytea_output setting r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x706c61696e') r = f('plain') self.assertIsInstance(r, str) self.assertEqual(r, '\\x706c61696e') r = f("das is' käse".encode()) self.assertIsInstance(r, bytes) self.assertEqual(r, b'\\x64617320697327206bc3a47365') r = f("das is' käse") self.assertIsInstance(r, str) self.assertEqual(r, '\\x64617320697327206bc3a47365') self.assertEqual(f(b'O\x00ps\xff!'), b'\\x4f007073ff21') def test_unescape_bytea(self): f = self.db.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f('plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, "das is' käse".encode()) r = f("das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, "das is' käse".encode()) self.assertEqual(f(r'O\\000ps\\377!'), b'O\\000ps\\377!') self.assertEqual(f(r'\\x706c61696e'), b'\\x706c61696e') self.assertEqual(f(r'\\x746861742773206be47365'), b'\\x746861742773206be47365') self.assertEqual(f(r'\\x4f007073ff21'), b'\\x4f007073ff21') def test_decode_json(self): f = self.db.decode_json self.assertIsNone(f('null')) data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(text) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], str) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def test_encode_json(self): f = self.db.encode_json self.assertEqual(f(None), 'null') data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} text = json.dumps(data) r = f(data) self.assertIsInstance(r, str) self.assertEqual(r, text) def test_get_parameter(self): f = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') r = f('standard_conforming_strings') self.assertEqual(r, 'on') r = f('lc_monetary') self.assertEqual(r, 'C') r = f('datestyle') self.assertEqual(r, 'ISO, YMD') r = f('bytea_output') self.assertEqual(r, 'hex') r = f(['bytea_output', 'lc_monetary']) self.assertIsInstance(r, list) self.assertEqual(r, ['hex', 'C']) r = f(('standard_conforming_strings', 'datestyle', 'bytea_output')) self.assertEqual(r, ['on', 'ISO, YMD', 'hex']) r = f({'bytea_output', 'lc_monetary'}) self.assertIsInstance(r, dict) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) r = f({'Bytea_Output', ' LC_Monetary '}) self.assertIsInstance(r, dict) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) s = dict.fromkeys(('bytea_output', 'lc_monetary')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) s = dict.fromkeys(('Bytea_Output', ' LC_Monetary ')) r = f(s) self.assertIs(r, s) self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) def test_get_parameter_server_version(self): r = self.db.get_parameter('server_version_num') self.assertIsInstance(r, str) s = self.db.server_version self.assertIsInstance(s, int) self.assertEqual(r, str(s)) def test_get_parameter_all(self): f = self.db.get_parameter r = f('all') self.assertIsInstance(r, dict) self.assertEqual(r['standard_conforming_strings'], 'on') self.assertEqual(r['lc_monetary'], 'C') self.assertEqual(r['DateStyle'], 'ISO, YMD') self.assertEqual(r['bytea_output'], 'hex') def test_set_parameter(self): f = self.db.set_parameter g = self.db.get_parameter self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '') self.assertRaises(TypeError, f, []) self.assertRaises(TypeError, f, ['']) self.assertRaises(ValueError, f, 'all', 'invalid') self.assertRaises(ValueError, f, { 'invalid1': 'value1', 'invalid2': 'value2'}, 'value') self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') f('standard_conforming_strings', 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f('datestyle', 'ISO, DMY') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['standard_conforming_strings', 'datestyle'], ['on', 'ISO, DMY']) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, DMY') f(['escape_string_warning', 'standard_conforming_strings'], 'off') self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f(('standard_conforming_strings', 'datestyle'), ('on', 'ISO, YMD')) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') f(('escape_string_warning', 'standard_conforming_strings'), 'off') self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f({'escape_string_warning', 'standard_conforming_strings'}, 'on') self.assertEqual(g('escape_string_warning'), 'on') self.assertEqual(g('standard_conforming_strings'), 'on') self.assertRaises( ValueError, f, {'escape_string_warning', 'standard_conforming_strings'}, ['off', 'on']) f({'escape_string_warning', 'standard_conforming_strings'}, ['off', 'off']) self.assertEqual(g('escape_string_warning'), 'off') self.assertEqual(g('standard_conforming_strings'), 'off') f({'standard_conforming_strings': 'on', 'datestyle': 'ISO, YMD'}) self.assertEqual(g('standard_conforming_strings'), 'on') self.assertEqual(g('datestyle'), 'ISO, YMD') def test_reset_parameter(self): db = DB() f = db.set_parameter g = db.get_parameter r = g('escape_string_warning') self.assertIn(r, ('on', 'off')) esw, not_esw = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f('escape_string_warning') f('standard_conforming_strings', None) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f(['escape_string_warning', 'standard_conforming_strings'], None) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f(('escape_string_warning', 'standard_conforming_strings')) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) f('escape_string_warning', not_esw) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_esw) self.assertEqual(g('standard_conforming_strings'), not_scs) f({'escape_string_warning', 'standard_conforming_strings'}) self.assertEqual(g('escape_string_warning'), esw) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def test_reset_parameter_all(self): db = DB() f = db.set_parameter self.assertRaises(ValueError, f, 'all', 0) self.assertRaises(ValueError, f, 'all', 'off') g = db.get_parameter r = g('escape_string_warning') self.assertIn(r, ('on', 'off')) dwi, not_dwi = r, 'off' if r == 'on' else 'on' r = g('standard_conforming_strings') self.assertIn(r, ('on', 'off')) scs, not_scs = r, 'off' if r == 'on' else 'on' f('escape_string_warning', not_dwi) f('standard_conforming_strings', not_scs) self.assertEqual(g('escape_string_warning'), not_dwi) self.assertEqual(g('standard_conforming_strings'), not_scs) f('all') self.assertEqual(g('escape_string_warning'), dwi) self.assertEqual(g('standard_conforming_strings'), scs) db.close() def test_set_parameter_local(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=True) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'on') def test_set_parameter_session(self): f = self.db.set_parameter g = self.db.get_parameter self.assertEqual(g('standard_conforming_strings'), 'on') self.db.begin() f('standard_conforming_strings', 'off', local=False) self.assertEqual(g('standard_conforming_strings'), 'off') self.db.end() self.assertEqual(g('standard_conforming_strings'), 'off') def test_reset(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle = 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reset() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) q = con.query("show datestyle") r = q.getresult()[0][0] self.assertEqual(r, default_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def test_reopen(self): db = DB() default_datestyle = db.get_parameter('datestyle') changed_datestyle = 'ISO, DMY' if changed_datestyle == default_datestyle: changed_datestyle = 'ISO, YMD' self.db.set_parameter('datestyle', changed_datestyle) r = self.db.get_parameter('datestyle') self.assertEqual(r, changed_datestyle) con = self.db.db q = con.query("show datestyle") self.db.reopen() r = q.getresult()[0][0] self.assertEqual(r, changed_datestyle) self.assertRaises(TypeError, getattr, con, 'query') r = self.db.get_parameter('datestyle') self.assertEqual(r, default_datestyle) db.close() def test_create_table(self): table = 'test hello world' values = [(2, "World!"), (1, "Hello")] self.create_table(table, "n smallint, t varchar", temporary=True, oids=False, values=values) r = self.db.query(f'select t from "{table}" order by n').getresult() r = ', '.join(row[0] for row in r) self.assertEqual(r, "Hello, World!") def test_create_table_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") table = 'test hello world' values = [(2, "World!"), (1, "Hello")] self.create_table(table, "n smallint, t varchar", temporary=True, oids=True, values=values) r = self.db.query(f'select t from "{table}" order by n').getresult() r = ', '.join(row[0] for row in r) self.assertEqual(r, "Hello, World!") r = self.db.query(f'select oid from "{table}" limit 1').getresult() self.assertIsInstance(r[0][0], int) def test_query(self): query = self.db.query table = 'test_table' self.create_table(table, "n integer", oids=False) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '1') q = "select n from test_table where n>1" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, int) self.assertEqual(r, 2) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') # noinspection SqlWithoutWhere q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def test_query_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") query = self.db.query table = 'test_table' self.create_table(table, "n integer", oids=True) q = "insert into test_table values (1)" r = query(q) self.assertIsInstance(r, int) q = "insert into test_table select 2" r = query(q) self.assertIsInstance(r, int) oid = r q = "select oid from test_table where n=2" r = query(q).getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r, oid) q = "insert into test_table select 3 union select 4 union select 5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '3') q = "update test_table set n=4 where n<5" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '4') # noinspection SqlWithoutWhere q = "delete from test_table" r = query(q) self.assertIsInstance(r, str) self.assertEqual(r, '5') def test_multiple_queries(self): self.assertEqual(self.db.query( "create temporary table test_multi (n integer);" "insert into test_multi values (4711);" "select n from test_multi").getresult()[0][0], 4711) def test_query_with_params(self): query = self.db.query self.create_table('test_table', 'n1 integer, n2 integer', oids=False) q = "insert into test_table values ($1, $2)" r = query(q, (1, 2)) self.assertEqual(r, '1') r = query(q, [3, 4]) self.assertEqual(r, '1') r = query(q, [5, 6]) self.assertEqual(r, '1') q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 4), (5, 6)]) q = "select * from test_table where n1=$1 and n2=$2" self.assertEqual(query(q, 3, 4).getresult(), [(3, 4)]) q = "update test_table set n2=$2 where n1=$1" r = query(q, 3, 7) self.assertEqual(r, '1') q = "select * from test_table order by 1, 2" self.assertEqual(query(q).getresult(), [(1, 2), (3, 7), (5, 6)]) q = "delete from test_table where n2!=$1" r = query(q, 4) self.assertEqual(r, '3') def test_empty_query(self): self.assertRaises(ValueError, self.db.query, '') def test_query_data_error(self): try: self.db.query("select 1/0") except pg.DataError as error: # noinspection PyUnresolvedReferences self.assertEqual(error.sqlstate, '22012') def test_query_formatted(self): f = self.db.query_formatted t = True if pg.get_bool() else 't' # test with tuple q = f("select %s::int, %s::real, %s::text, %s::bool", (3, 2.5, 'hello', True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with tuple, inline q = f("select %s, %s, %s, %s", (3, 2.5, 'hello', True), inline=True) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict q = f("select %(a)s::int, %(b)s::real, %(c)s::text, %(d)s::bool", dict(a=3, b=2.5, c='hello', d=True)) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict, inline q = f("select %(a)s, %(b)s, %(c)s, %(d)s", dict(a=3, b=2.5, c='hello', d=True), inline=True) r = q.getresult()[0] self.assertEqual(r, (3, 2.5, 'hello', t)) # test with dict and extra values q = f("select %(a)s||%(b)s||%(c)s||%(d)s||'epsilon'", dict(a='alpha', b='beta', c='gamma', d='delta', e='extra')) r = q.getresult()[0][0] self.assertEqual(r, 'alphabetagammadeltaepsilon') def test_query_formatted_with_any(self): f = self.db.query_formatted q = "select 2 = any(%s)" r = f(q, [[1, 3]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[1, 2, 3]]).getresult()[0][0] self.assertEqual(r, True if pg.get_bool() else 't') r = f(q, [[]]).getresult()[0][0] self.assertEqual(r, False if pg.get_bool() else 'f') r = f(q, [[None]]).getresult()[0][0] self.assertIsNone(r) def test_query_formatted_without_params(self): f = self.db.query_formatted q = "select 42" r = f(q).getresult()[0][0] self.assertEqual(r, 42) r = f(q, None).getresult()[0][0] self.assertEqual(r, 42) r = f(q, []).getresult()[0][0] self.assertEqual(r, 42) r = f(q, {}).getresult()[0][0] self.assertEqual(r, 42) def test_prepare(self): p = self.db.prepare self.assertIsNone(p('my query', "select 'hello'")) self.assertIsNone(p('my other query', "select 'world'")) self.assertRaises( pg.ProgrammingError, p, 'my query', "select 'hello, too'") def test_prepare_unnamed(self): p = self.db.prepare self.assertIsNone(p('', "select null")) self.assertIsNone(p(None, "select null")) def test_query_prepared_without_params(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, 'q') p = self.db.prepare p('q1', "select 17") p('q2', "select 42") r = f('q1').getresult()[0][0] self.assertEqual(r, 17) r = f('q2').getresult()[0][0] self.assertEqual(r, 42) def test_query_prepared_with_params(self): p = self.db.prepare p('sum', "select 1 + $1 + $2 + $3") p('cat', "select initcap($1) || ', ' || $2 || '!'") f = self.db.query_prepared r = f('sum', 2, 3, 5).getresult()[0][0] self.assertEqual(r, 11) r = f('cat', 'hello', 'world').getresult()[0][0] self.assertEqual(r, 'Hello, world!') def test_query_prepared_unnamed_with_out_params(self): f = self.db.query_prepared self.assertRaises(pg.OperationalError, f, None) self.assertRaises(pg.OperationalError, f, '') p = self.db.prepare # make sure all types are known so that we will not # generate other anonymous queries in the background p('', "select 'empty'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'empty') r = f('').getresult()[0][0] self.assertEqual(r, 'empty') p(None, "select 'none'::varchar") r = f(None).getresult()[0][0] self.assertEqual(r, 'none') r = f('').getresult()[0][0] self.assertEqual(r, 'none') def test_query_prepared_unnamed_with_params(self): p = self.db.prepare p('', "select 1 + $1 + $2") f = self.db.query_prepared r = f('', 2, 3).getresult()[0][0] self.assertEqual(r, 6) r = f(None, 2, 3).getresult()[0][0] self.assertEqual(r, 6) p(None, "select 2 + $1 + $2") f = self.db.query_prepared r = f('', 3, 4).getresult()[0][0] self.assertEqual(r, 9) r = f(None, 3, 4).getresult()[0][0] self.assertEqual(r, 9) def test_describe_prepared(self): self.db.prepare('count', "select 1 as first, 2 as second") f = self.db.describe_prepared r = f('count').listfields() self.assertEqual(r, ('first', 'second')) def test_describe_prepared_unnamed(self): self.db.prepare('', "select null as anon") f = self.db.describe_prepared r = f().listfields() self.assertEqual(r, ('anon',)) r = f(None).listfields() self.assertEqual(r, ('anon',)) r = f('').listfields() self.assertEqual(r, ('anon',)) def test_delete_prepared(self): f = self.db.delete_prepared f() e = pg.OperationalError self.assertRaises(e, f, 'myquery') p = self.db.prepare p('q1', "select 1") p('q2', "select 2") f('q1') f('q2') self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') p('q1', "select 1") p('q2', "select 2") f() self.assertRaises(e, f, 'q1') self.assertRaises(e, f, 'q2') def test_pkey(self): query = self.db.query pkey = self.db.pkey self.assertRaises(KeyError, pkey, 'test') for t in ('pkeytest', 'primary key test'): self.create_table(f'{t}0', 'a smallint') self.create_table(f'{t}1', 'b smallint primary key') self.create_table(f'{t}2', 'c smallint, d smallint primary key') self.create_table( f'{t}3', 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (f, h)') self.create_table( f'{t}4', 'e smallint, f smallint, g smallint, h smallint, i smallint,' ' primary key (h, f)') self.create_table( f'{t}5', 'more_than_one_letter varchar primary key') self.create_table( f'{t}6', '"with space" date primary key') self.create_table( f'{t}7', 'a_very_long_column_name varchar, "with space" date, "42" int,' ' primary key (a_very_long_column_name, "with space", "42")') self.assertRaises(KeyError, pkey, f'{t}0') self.assertEqual(pkey(f'{t}1'), 'b') self.assertEqual(pkey(f'{t}1', True), ('b',)) self.assertEqual(pkey(f'{t}1', composite=False), 'b') self.assertEqual(pkey(f'{t}1', composite=True), ('b',)) self.assertEqual(pkey(f'{t}2'), 'd') self.assertEqual(pkey(f'{t}2', composite=True), ('d',)) r = pkey(f'{t}3') self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey(f'{t}3', composite=False) self.assertIsInstance(r, tuple) self.assertEqual(r, ('f', 'h')) r = pkey(f'{t}4') self.assertIsInstance(r, tuple) self.assertEqual(r, ('h', 'f')) self.assertEqual(pkey(f'{t}5'), 'more_than_one_letter') self.assertEqual(pkey(f'{t}6'), 'with space') r = pkey(f'{t}7') self.assertIsInstance(r, tuple) self.assertEqual(r, ( 'a_very_long_column_name', 'with space', '42')) # a newly added primary key will be detected query(f'alter table "{t}0" add primary key (a)') self.assertEqual(pkey(f'{t}0'), 'a') # a changed primary key will not be detected, # indicating that the internal cache is operating query(f'alter table "{t}1" rename column b to x') self.assertEqual(pkey(f'{t}1'), 'b') # we get the changed primary key when the cache is flushed self.assertEqual(pkey(f'{t}1', flush=True), 'x') def test_pkeys(self): pkeys = self.db.pkeys t = 'pkeys_test_' self.create_table(f'{t}0', 'a int') self.create_table(f'{t}1', 'a int primary key, b int') self.create_table(f'{t}2', 'a int, b int, c int, primary key (a, c)') self.assertRaises(KeyError, pkeys, f'{t}0') self.assertEqual(pkeys(f'{t}1'), ('a',)) self.assertEqual(pkeys(f'{t}2'), ('a', 'c')) def test_get_databases(self): databases = self.db.get_databases() self.assertIn('template0', databases) self.assertIn('template1', databases) self.assertNotIn('not existing database', databases) self.assertIn('postgres', databases) self.assertIn(dbname, databases) def test_get_tables(self): get_tables = self.db.get_tables tables = ('A very Special Name', 'A_MiXeD_quoted_NaMe', 'Hello, Test World!', 'Zoro', 'a1', 'a2', 'a321', 'averyveryveryveryveryveryveryreallyreallylongtablename', 'b0', 'b3', 'x', 'xXx', 'xx', 'y', 'z') for t in tables: self.db.query(f'drop table if exists "{t}" cascade') before_tables = get_tables() self.assertIsInstance(before_tables, list) for t in before_tables: s = t.split('.', 1) self.assertGreaterEqual(len(s), 2) if len(s) > 2: self.assertTrue(s[1].startswith('"')) t = s[0] self.assertNotEqual(t, 'information_schema') self.assertFalse(t.startswith('pg_')) for t in tables: self.create_table(t, 'as select 0', temporary=False) current_tables = get_tables() new_tables = [t for t in current_tables if t not in before_tables] expected_new_tables = ['public.' + ( f'"{t}"' if ' ' in t or t != t.lower() else t) for t in tables] self.assertEqual(new_tables, expected_new_tables) self.doCleanups() after_tables = get_tables() self.assertEqual(after_tables, before_tables) def test_get_system_tables(self): get_tables = self.db.get_tables result = get_tables() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_tables(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) def test_get_relations(self): get_relations = self.db.get_relations result = get_relations() self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('rv') self.assertIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('r') self.assertIn('public.test', result) self.assertNotIn('public.test_view', result) result = get_relations('v') self.assertNotIn('public.test', result) self.assertIn('public.test_view', result) result = get_relations('cisSt') self.assertNotIn('public.test', result) self.assertNotIn('public.test_view', result) def test_get_system_relations(self): get_relations = self.db.get_relations result = get_relations() self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=False) self.assertNotIn('pg_catalog.pg_class', result) self.assertNotIn('information_schema.tables', result) result = get_relations(system=True) self.assertIn('pg_catalog.pg_class', result) self.assertIn('information_schema.tables', result) def test_get_attnames(self): get_attnames = self.db.get_attnames self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'does_not_exist') self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'has.too.many.dots') r = get_attnames('test') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( i2='smallint', i4='integer', i8='bigint', d='numeric', f4='real', f8='double precision', m='money', v4='character varying', c4='character', t='text')) else: self.assertEqual(r, dict( i2='int', i4='int', i8='int', d='num', f4='float', f8='float', m='money', v4='text', c4='text', t='text')) self.create_table('test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') r = get_attnames('test_table') self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) else: self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def test_get_attnames_with_quotes(self): get_attnames = self.db.get_attnames table = 'test table for get_attnames()' self.create_table( table, '"Prime!" smallint, "much space" integer, "Questions?" text') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'Prime!': 'smallint', 'much space': 'integer', 'Questions?': 'text'}) else: self.assertEqual(r, { 'Prime!': 'int', 'much space': 'int', 'Questions?': 'text'}) table = 'yet another test table for get_attnames()' self.create_table(table, 'a smallint, b integer, c bigint,' ' e numeric, f real, f2 double precision, m money,' ' x smallint, y smallint, z smallint,' ' Normal_NaMe smallint, "Special Name" smallint,' ' t text, u char(2), v varchar(2),' ' primary key (y, u)') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'a': 'smallint', 'b': 'integer', 'c': 'bigint', 'e': 'numeric', 'f': 'real', 'f2': 'double precision', 'm': 'money', 'normal_name': 'smallint', 'Special Name': 'smallint', 'u': 'character', 't': 'text', 'v': 'character varying', 'y': 'smallint', 'x': 'smallint', 'z': 'smallint'}) else: self.assertEqual(r, { 'a': 'int', 'b': 'int', 'c': 'int', 'e': 'num', 'f': 'float', 'f2': 'float', 'm': 'money', 'normal_name': 'int', 'Special Name': 'int', 'u': 'text', 't': 'text', 'v': 'text', 'y': 'int', 'x': 'int', 'z': 'int'}) def test_get_attnames_with_regtypes(self): get_attnames = self.db.get_attnames self.create_table( 'test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(True) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='integer', alpha='smallint', beta='boolean', gamma='character', tau='text', v='character varying')) def test_get_attnames_without_regtypes(self): get_attnames = self.db.get_attnames self.create_table( 'test_table', 'n int, alpha smallint, beta bool,' ' gamma char(5), tau text, v varchar(3)') use_regtypes = self.db.use_regtypes regtypes = use_regtypes() self.assertEqual(regtypes, self.regtypes) use_regtypes(False) try: r = get_attnames("test_table") self.assertIsInstance(r, dict) finally: use_regtypes(regtypes) self.assertEqual(r, dict( n='int', alpha='int', beta='bool', gamma='text', tau='text', v='text')) def test_get_attnames_is_cached(self): get_attnames = self.db.get_attnames int_type = 'integer' if self.regtypes else 'int' text_type = 'text' query = self.db.query self.create_table('test_table', 'col int') r = get_attnames("test_table") self.assertIsInstance(r, dict) self.assertEqual(r, dict(col=int_type)) query("alter table test_table alter column col type text") query("alter table test_table add column col2 int") r = get_attnames("test_table") self.assertEqual(r, dict(col=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type, col2=int_type)) query("alter table test_table drop column col2") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type, col2=int_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict(col=text_type)) query("alter table test_table drop column col") r = get_attnames("test_table") self.assertEqual(r, dict(col=text_type)) r = get_attnames("test_table", flush=True) self.assertEqual(r, dict()) def test_get_attnames_is_ordered(self): get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'i2': 'smallint', 'i4': 'integer', 'i8': 'bigint', 'd': 'numeric', 'f4': 'real', 'f8': 'double precision', 'm': 'money', 'v4': 'character varying', 'c4': 'character', 't': 'text'}) else: self.assertEqual(r, { 'i2': 'int', 'i4': 'int', 'i8': 'int', 'd': 'num', 'f4': 'float', 'f8': 'float', 'm': 'money', 'v4': 'text', 'c4': 'text', 't': 'text'}) r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.create_table( table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, dict) if self.regtypes: self.assertEqual(r, { 'n': 'integer', 'alpha': 'smallint', 'v': 'character varying', 'gamma': 'character', 'tau': 'text', 'beta': 'boolean'}) else: self.assertEqual(r, { 'n': 'int', 'alpha': 'int', 'v': 'text', 'gamma': 'text', 'tau': 'text', 'beta': 'bool'}) r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') def test_get_attnames_is_attr_dict(self): from pg.attrs import AttrDict get_attnames = self.db.get_attnames r = get_attnames('test', flush=True) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict( i2='smallint', i4='integer', i8='bigint', d='numeric', f4='real', f8='double precision', m='money', v4='character varying', c4='character', t='text')) else: self.assertEqual(r, AttrDict( i2='int', i4='int', i8='int', d='num', f4='float', f8='float', m='money', v4='text', c4='text', t='text')) r = ' '.join(list(r.keys())) self.assertEqual(r, 'i2 i4 i8 d f4 f8 m v4 c4 t') table = 'test table for get_attnames' self.create_table( table, 'n int, alpha smallint, v varchar(3),' ' gamma char(5), tau text, beta bool') r = get_attnames(table) self.assertIsInstance(r, AttrDict) if self.regtypes: self.assertEqual(r, AttrDict( n='integer', alpha='smallint', v='character varying', gamma='character', tau='text', beta='boolean')) else: self.assertEqual(r, AttrDict( n='int', alpha='int', v='text', gamma='text', tau='text', beta='bool')) r = ' '.join(list(r.keys())) self.assertEqual(r, 'n alpha v gamma tau beta') def test_get_generated(self): get_generated = self.db.get_generated server_version = self.db.server_version if server_version >= 100000: self.assertRaises(pg.ProgrammingError, self.db.get_generated, 'does_not_exist') self.assertRaises(pg.ProgrammingError, self.db.get_generated, 'has.too.many.dots') r = get_generated('test') self.assertIsInstance(r, frozenset) self.assertFalse(r) if server_version >= 100000: table = 'test_get_generated_1' self.create_table( table, 'i int generated always as identity primary key,' ' j int generated always as identity,' ' k int generated by default as identity,' ' n serial, m int') r = get_generated(table) self.assertIsInstance(r, frozenset) self.assertEqual(r, {'i', 'j'}) if server_version >= 120000: table = 'test_get_generated_2' self.create_table( table, 'n int, m int generated always as (n + 3) stored,' ' i int generated always as identity,' ' j int generated by default as identity') r = get_generated(table) self.assertIsInstance(r, frozenset) self.assertEqual(r, {'m', 'i'}) def test_get_generated_is_cached(self): server_version = self.db.server_version if server_version < 100000: self.skipTest("database does not support generated columns") get_generated = self.db.get_generated query = self.db.query table = 'test_get_generated_2' self.create_table(table, 'i int primary key') self.assertFalse(get_generated(table)) query(f'alter table {table} alter column i' ' add generated always as identity') self.assertFalse(get_generated(table)) self.assertEqual(get_generated(table, flush=True), {'i'}) def test_has_table_privilege(self): can = self.db.has_table_privilege self.assertEqual(can('test'), True) self.assertEqual(can('test', 'select'), True) self.assertEqual(can('test', 'SeLeCt'), True) self.assertEqual(can('test', 'SELECT'), True) self.assertEqual(can('test', 'insert'), True) self.assertEqual(can('test', 'update'), True) self.assertEqual(can('test', 'delete'), True) self.assertRaises(pg.DataError, can, 'test', 'foobar') self.assertRaises(pg.ProgrammingError, can, 'table_does_not_exist') r = self.db.query( 'select rolsuper FROM pg_roles' ' where rolname=current_user').getresult()[0][0] if not pg.get_bool(): r = r == 't' if r: self.skipTest('must not be superuser') self.assertEqual(can('pg_views', 'select'), True) self.assertEqual(can('pg_views', 'delete'), False) def test_get(self): get = self.db.get query = self.db.query table = 'get_test_table' self.assertRaises(TypeError, get) self.assertRaises(TypeError, get, table) self.create_table(table, 'n integer, t text', values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) r: Any = get(table, 2, 'n') self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) r = get(table, 1, 'n') self.assertEqual(r, dict(n=1, t='x')) r = get(table, (3,), ('n',)) self.assertEqual(r, dict(n=3, t='z')) r = get(table, 'y', 't') self.assertEqual(r, dict(n=2, t='y')) self.assertRaises(pg.DatabaseError, get, table, 4) self.assertRaises(pg.DatabaseError, get, table, 4, 'n') self.assertRaises(pg.DatabaseError, get, table, 'y') self.assertRaises(pg.DatabaseError, get, table, 2, 't') s: dict = dict(n=3) self.assertRaises(pg.ProgrammingError, get, table, s) r = get(table, s, 'n') self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(t='x') r = get(table, s, 't') self.assertIs(r, s) self.assertEqual(s, dict(n=1, t='x')) r = get(table, s, ('n', 't')) self.assertIs(r, s) self.assertEqual(r, dict(n=1, t='x')) query(f'alter table "{table}" alter n set not null') query(f'alter table "{table}" add primary key (n)') r = get(table, 2) self.assertIsInstance(r, dict) self.assertEqual(r, dict(n=2, t='y')) self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table + '*', 2)['t'], 'y') self.assertEqual(get(table + ' *', 2)['t'], 'y') self.assertRaises(KeyError, get, table, (2, 2)) s = dict(n=3) r = get(table, s) self.assertIs(r, s) self.assertEqual(r, dict(n=3, t='z')) s.update(n=1) self.assertEqual(get(table, s)['t'], 'x') s.update(n=2) self.assertEqual(get(table, r)['t'], 'y') s.pop('n') self.assertRaises(KeyError, get, table, s) def test_get_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") get = self.db.get query = self.db.query table = 'get_with_oid_test_table' self.create_table(table, 'n integer, t text', oids=True, values=enumerate('xyz', start=1)) self.assertRaises(pg.ProgrammingError, get, table, 2) self.assertRaises(KeyError, get, table, {}, 'oid') r = get(table, 2, 'n') qoid = f'oid({table})' self.assertIn(qoid, r) oid = r[qoid] self.assertIsInstance(oid, int) result = {'t': 'y', 'n': 2, qoid: oid} self.assertEqual(r, result) r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) self.assertEqual(get(table + '*', 2, 'n'), r) self.assertEqual(get(table + ' *', 2, 'n'), r) self.assertEqual(get(table, oid, 'oid')['t'], 'y') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, 3, 'n')['t'], 'z') self.assertEqual(get(table, 2, 'n')['t'], 'y') self.assertRaises(pg.DatabaseError, get, table, 4, 'n') r['n'] = 3 self.assertEqual(get(table, r, 'n')['t'], 'z') self.assertEqual(get(table, 1, 'n')['t'], 'x') self.assertEqual(get(table, r, 'oid')['t'], 'z') query(f'alter table "{table}" alter n set not null') query(f'alter table "{table}" add primary key (n)') self.assertEqual(get(table, 3)['t'], 'z') self.assertEqual(get(table, 1)['t'], 'x') self.assertEqual(get(table, 2)['t'], 'y') r['n'] = 1 self.assertEqual(get(table, r)['t'], 'x') r['n'] = 3 self.assertEqual(get(table, r)['t'], 'z') r['n'] = 2 self.assertEqual(get(table, r)['t'], 'y') r = get(table, oid, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid)) self.assertEqual(r, result) r = get(table, dict(oid=oid), 'oid') self.assertEqual(r, result) r = get(table, {qoid: oid}) self.assertEqual(r, result) r = get(table, {qoid: oid}, 'oid') self.assertEqual(r, result) r = get(table, dict(oid=oid, n=1)) self.assertEqual(r['n'], 1) self.assertNotEqual(r[qoid], oid) r = get(table, dict(oid=oid, t='z'), 't') self.assertEqual(r['n'], 3) self.assertNotEqual(r[qoid], oid) def test_get_with_composite_key(self): get = self.db.get table = 'get_test_table_1' self.create_table( table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertEqual(get(table, 2)['t'], 'b') self.assertEqual(get(table, 1, 'n')['t'], 'a') self.assertEqual(get(table, 2, ('n',))['t'], 'b') self.assertEqual(get(table, 3, ['n'])['t'], 'c') self.assertEqual(get(table, (2,), ('n',))['t'], 'b') self.assertEqual(get(table, 'b', 't')['n'], 2) self.assertEqual(get(table, ('a',), ('t',))['n'], 1) self.assertEqual(get(table, ['c'], ['t'])['n'], 3) table = 'get_test_table_2' self.create_table( table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, get, table, 2) self.assertEqual(get(table, (1, 1))['t'], 'a') self.assertEqual(get(table, (1, 2))['t'], 'b') self.assertEqual(get(table, (2, 1))['t'], 'c') self.assertEqual(get(table, (1, 2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, (1, 2), ('m', 'n'))['t'], 'c') self.assertEqual(get(table, (3, 1), ('n', 'm'))['t'], 'e') self.assertEqual(get(table, (1, 3), ('m', 'n'))['t'], 'e') self.assertEqual(get(table, dict(n=2, m=2))['t'], 'd') self.assertEqual(get(table, dict(n=1, m=2), ('n', 'm'))['t'], 'b') self.assertEqual(get(table, dict(n=2, m=1), ['n', 'm'])['t'], 'c') self.assertEqual(get(table, dict(n=3, m=2), ('m', 'n'))['t'], 'f') def test_get_with_quoted_names(self): get = self.db.get table = 'test table for get()' self.create_table( table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(17, 1001, 'No!')]) r = get(table, 17) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 17) self.assertEqual(r['much space'], 1001) self.assertEqual(r['Questions?'], 'No!') def test_get_from_view(self): self.db.query('delete from test where i4=14') self.db.query('insert into test (i4, v4) values(' "14, 'abc4')") r = self.db.get('test_view', 14, 'i4') self.assertIn('v4', r) self.assertEqual(r['v4'], 'abc4') def test_get_little_bobby_tables(self): get = self.db.get query = self.db.query self.create_table( 'test_students', 'firstname varchar primary key, nickname varchar, grade char(2)', values=[("D'Arcy", 'Darcey', 'A+'), ('Sheldon', 'Moonpie', 'A+'), ('Robert', 'Little Bobby Tables', 'D-')]) r = get('test_students', 'Sheldon') self.assertEqual(r, dict( firstname="Sheldon", nickname='Moonpie', grade='A+')) r = get('test_students', 'Robert') self.assertEqual(r, dict( firstname="Robert", nickname='Little Bobby Tables', grade='D-')) r = get('test_students', "D'Arcy") self.assertEqual(r, dict( firstname="D'Arcy", nickname='Darcey', grade='A+')) try: get('test_students', "D' Arcy") except pg.DatabaseError as error: self.assertEqual( str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="D\' Arcy"') try: get('test_students', "Robert'); TRUNCATE TABLE test_students;--") except pg.DatabaseError as error: self.assertEqual( str(error), 'No such record in test_students\nwhere "firstname" = $1\n' 'with $1="Robert\'); TRUNCATE TABLE test_students;--"') q = "select * from test_students order by 1 limit 4" r = query(q).getresult() self.assertEqual(len(r), 3) self.assertEqual(r[1][2], 'D-') def test_insert(self): insert = self.db.insert query = self.db.query bool_on = pg.get_bool() decimal = pg.get_decimal() table = 'insert_test_table' self.create_table( table, 'i2 smallint, i4 integer, i8 bigint,' ' d numeric, f4 real, f8 double precision, m money,' ' v4 varchar(4), c4 char(4), t text,' ' b boolean, ts timestamp') tests: list[dict | tuple[dict, dict]] = [ dict(i2=None, i4=None, i8=None), (dict(i2='', i4='', i8=''), dict(i2=None, i4=None, i8=None)), (dict(i2=0, i4=0, i8=0), dict(i2=0, i4=0, i8=0)), dict(i2=42, i4=123456, i8=9876543210), dict(i2=2 ** 15 - 1, i4=2 ** 31 - 1, i8=2 ** 63 - 1), dict(d=None), (dict(d=''), dict(d=None)), dict(d=Decimal(0)), (dict(d=0), dict(d=Decimal(0))), dict(f4=None, f8=None), dict(f4=0, f8=0), (dict(f4='', f8=''), dict(f4=None, f8=None)), (dict(d=1234.5, f4=1234.5, f8=1234.5), dict(d=Decimal('1234.5'))), dict(d=Decimal('123.456789'), f4=12.375, f8=123.4921875), dict(d=Decimal('123456789.9876543212345678987654321')), dict(m=None), (dict(m=''), dict(m=None)), dict(m=Decimal('-1234.56')), (dict(m='-1234.56'), dict(m=Decimal('-1234.56'))), dict(m=Decimal('1234.56')), dict(m=Decimal('123456')), (dict(m='1234.56'), dict(m=Decimal('1234.56'))), (dict(m=1234.5), dict(m=Decimal('1234.5'))), (dict(m=-1234.5), dict(m=Decimal('-1234.5'))), (dict(m=123456), dict(m=Decimal('123456'))), (dict(m='1234567.89'), dict(m=Decimal('1234567.89'))), dict(b=None), (dict(b=''), dict(b=None)), dict(b='f'), dict(b='t'), (dict(b=0), dict(b='f')), (dict(b=1), dict(b='t')), (dict(b=False), dict(b='f')), (dict(b=True), dict(b='t')), (dict(b='0'), dict(b='f')), (dict(b='1'), dict(b='t')), (dict(b='n'), dict(b='f')), (dict(b='y'), dict(b='t')), (dict(b='no'), dict(b='f')), (dict(b='yes'), dict(b='t')), (dict(b='off'), dict(b='f')), (dict(b='on'), dict(b='t')), dict(v4=None, c4=None, t=None), (dict(v4='', c4='', t=''), dict(c4=' ' * 4)), dict(v4='1234', c4='1234', t='1234' * 10), dict(v4='abcd', c4='abcd', t='abcdefg'), (dict(v4='abc', c4='abc', t='abc'), dict(c4='abc ')), dict(ts=None), (dict(ts=''), dict(ts=None)), (dict(ts=0), dict(ts=None)), (dict(ts=False), dict(ts=None)), dict(ts='2012-12-21 00:00:00'), (dict(ts='2012-12-21'), dict(ts='2012-12-21 00:00:00')), dict(ts='2012-12-21 12:21:12'), dict(ts='2013-01-05 12:13:14'), dict(ts='current_timestamp')] for test in tests: if isinstance(test, dict): data: dict = test change: dict = {} else: data, change = test expect = data.copy() expect.update(change) if bool_on: b = expect.get('b') if b is not None: expect['b'] = b == 't' if decimal is not Decimal: d = expect.get('d') if d is not None: expect['d'] = decimal(d) m = expect.get('m') if m is not None: expect['m'] = decimal(m) self.assertEqual(insert(table, data), data) data = dict(item for item in data.items() if item[0] in expect) ts = expect.get('ts') if ts: if ts == 'current_timestamp': ts = data['ts'] self.assertIsInstance(ts, datetime) self.assertEqual( ts.strftime('%Y-%m-%d'), strftime('%Y-%m-%d')) else: ts = datetime.strptime(ts, '%Y-%m-%d %H:%M:%S') expect['ts'] = ts self.assertEqual(data, expect) data = query(f'select * from "{table}"').dictresult()[0] data = dict(item for item in data.items() if item[0] in expect) self.assertEqual(data, expect) query(f'truncate table "{table}"') def test_insert_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") insert = self.db.insert query = self.db.query self.create_table('test_table', 'n int', oids=True) self.assertRaises(pg.ProgrammingError, insert, 'test_table', m=1) r = insert('test_table', n=1) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) self.assertNotIn('oid', r) qoid = 'oid(test_table)' self.assertIn(qoid, r) oid = r[qoid] self.assertEqual(sorted(r.keys()), ['n', qoid]) r = insert('test_table', n=2, oid=oid) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 2) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r = insert('test_table', None, n=3) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 3) s = r r = insert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table *', r) self.assertIs(r, s) self.assertEqual(r['n'], 3) r = insert('test_table', r, n=4) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertNotIn('oid', r) self.assertIn(qoid, r) oid = r[qoid] r = insert('test_table', r, n=5, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) r['oid'] = oid = r[qoid] r = insert('test_table', r, n=6) self.assertIs(r, s) self.assertEqual(r['n'], 6) self.assertIn(qoid, r) self.assertNotEqual(r[qoid], oid) self.assertNotIn('oid', r) q = 'select n from test_table order by 1 limit 9' r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '1 2 3 3 3 4 5 6') query("truncate table test_table") query("alter table test_table add unique (n)") r = insert('test_table', dict(n=7)) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) self.assertRaises(pg.IntegrityError, insert, 'test_table', r) r['n'] = 6 self.assertRaises(pg.IntegrityError, insert, 'test_table', r, n=7) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 7) r['n'] = 6 r = insert('test_table', r) self.assertIsInstance(r, dict) self.assertEqual(r['n'], 6) r = ' '.join(str(row[0]) for row in query(q).getresult()) self.assertEqual(r, '6 7') def test_insert_with_quoted_names(self): insert = self.db.insert query = self.db.query table = 'test table for insert()' self.create_table(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r: Any = {'Prime!': 11, 'much space': 2002, 'Questions?': 'What?'} r = insert(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') r = query(f'select * from "{table}" limit 2').dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 11) self.assertEqual(r['much space'], 2002) self.assertEqual(r['Questions?'], 'What?') def test_insert_into_view(self): insert = self.db.insert query = self.db.query query("truncate table test") q = 'select * from test_view order by i4 limit 3' r: Any = query(q).getresult() self.assertEqual(r, []) r = dict(i4=1234, v4='abcd') insert('test', r) self.assertIsNone(r['i2']) self.assertEqual(r['i4'], 1234) self.assertIsNone(r['i8']) self.assertEqual(r['v4'], 'abcd') self.assertIsNone(r['c4']) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd')]) r = dict(i4=5678, v4='efgh') insert('test_view', r) self.assertNotIn('i2', r) self.assertEqual(r['i4'], 5678) self.assertNotIn('i8', r) self.assertEqual(r['v4'], 'efgh') self.assertNotIn('c4', r) r = query(q).getresult() self.assertEqual(r, [(1234, 'abcd'), (5678, 'efgh')]) def test_insert_with_generated_columns(self): insert = self.db.insert get = self.db.get server_version = self.db.server_version table = 'insert_test_table_2' table_def = 'i int not null' if server_version >= 100000: table_def += ( ', a int generated always as identity' ', d int generated by default as identity primary key') else: table_def += ', a int not null default 1, d int primary key' if server_version >= 120000: table_def += ', j int generated always as (i + 7) stored' else: table_def += ', j int not null default 42' self.create_table(table, table_def) i, d = 35, 1001 j = i + 7 r = insert(table, {'i': i, 'd': d, 'a': 1, 'j': j}) self.assertIsInstance(r, dict) self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) r = get(table, d) self.assertIsInstance(r, dict) self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) def test_update(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.create_table(table, 'n integer primary key, t text', values=enumerate('xyz', start=1)) self.assertRaises(pg.DatabaseError, self.db.get, table, 4) r = self.db.get(table, 2) r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = f'select t from "{table}" where n=2' r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def test_update_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") update = self.db.update get = self.db.get query = self.db.query self.create_table('test_table', 'n int', oids=True, values=[1]) s = get('test_table', 1, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 1) s['n'] = 2 r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 2) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertEqual(sorted(r.keys()), ['n', qoid]) r['n'] = 3 oid = r.pop(qoid) r = update('test_table', r, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 3) r.pop(qoid) self.assertRaises(pg.ProgrammingError, update, 'test_table', r) s = get('test_table', 3, 'n') self.assertIsInstance(s, dict) self.assertEqual(s['n'], 3) s.pop('n') r = update('test_table', s) oid = r.pop(qoid) self.assertEqual(r, {}) q = "select n from test_table limit 2" r = query(q).getresult() self.assertEqual(r, [(3,)]) query("insert into test_table values (1)") self.assertRaises(pg.ProgrammingError, update, 'test_table', dict(oid=oid, n=4)) r = update('test_table', dict(n=4), oid=oid) self.assertEqual(r['n'], 4) r = update('test_table *', dict(n=5), oid=oid) self.assertEqual(r['n'], 5) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=1, m=4) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 4) s = dict(m=7) r = update('test_table', s, n=5) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 7) q = "select n, m from test_table order by 1 limit 3" r = query(q).getresult() self.assertEqual(r, [(1, 4), (5, 7)]) s = dict(m=9, oid=oid) self.assertRaises(KeyError, update, 'test_table', s) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 5) self.assertEqual(r['m'], 9) s = dict(n=1, m=3, oid=oid) r = update('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r = query(q).getresult() self.assertEqual(r, [(1, 3), (5, 9)]) s.update(n=4, m=7) r = update('test_table', s, oid=oid) self.assertIs(r, s) self.assertEqual(r['n'], 4) self.assertEqual(r['m'], 7) r = query(q).getresult() self.assertEqual(r, [(1, 3), (4, 7)]) def test_update_without_oid(self): update = self.db.update query = self.db.query self.assertRaises(pg.ProgrammingError, update, 'test', i2=2, i4=4, i8=8) table = 'update_test_table' self.create_table(table, 'n integer primary key, t text', oids=False, values=enumerate('xyz', start=1)) r = self.db.get(table, 2) r['t'] = 'u' s = update(table, r) self.assertEqual(s, r) q = f'select t from "{table}" where n=2' r = query(q).getresult()[0][0] self.assertEqual(r, 'u') def test_update_with_composite_key(self): update = self.db.update query = self.db.query table = 'update_test_table_1' self.create_table(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, update, table, dict(t='b')) s = dict(n=2, t='d') r = update(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'd') q = f'select t from "{table}" where n=2' r = query(q).getresult()[0][0] self.assertEqual(r, 'd') s.update(dict(n=4, t='e')) r = update(table, s) self.assertEqual(r['n'], 4) self.assertEqual(r['t'], 'e') q = f'select t from "{table}" where n=2' r = query(q).getresult()[0][0] self.assertEqual(r, 'd') q = f'select t from "{table}" where n=4' r = query(q).getresult() self.assertEqual(len(r), 0) query(f'drop table "{table}"') table = 'update_test_table_2' self.create_table(table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, update, table, dict(n=2, t='b')) self.assertEqual(update(table, dict(n=2, m=2, t='x'))['t'], 'x') q = f'select t from "{table}" where n=2 order by m' r = [r[0] for r in query(q).getresult()] self.assertEqual(r, ['c', 'x']) def test_update_with_quoted_names(self): update = self.db.update query = self.db.query table = 'test table for update()' self.create_table(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(13, 3003, 'Why!')]) r: Any = {'Prime!': 13, 'much space': 7007, 'Questions?': 'When?'} r = update(table, r) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') r = query(f'select * from "{table}" limit 2').dictresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(r['Prime!'], 13) self.assertEqual(r['much space'], 7007) self.assertEqual(r['Questions?'], 'When?') def test_update_with_generated_columns(self): update = self.db.update get = self.db.get query = self.db.query server_version = self.db.server_version table = 'update_test_table_2' table_def = 'i int not null' if server_version >= 100000: table_def += ( ', a int generated always as identity' ', d int generated by default as identity primary key') else: table_def += ', a int not null default 1, d int primary key' if server_version >= 120000: table_def += ', j int generated always as (i + 7) stored' else: table_def += ', j int not null default 42' self.create_table(table, table_def) i, d = 35, 1001 j = i + 7 r: Any = query(f'insert into {table} (i, d) values ({i}, {d})') self.assertEqual(r, '1') r = get(table, d) self.assertIsInstance(r, dict) self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) r['i'] += 1 r = update(table, r) i += 1 if server_version >= 120000: j += 1 self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) def test_upsert(self): upsert = self.db.upsert query = self.db.query self.assertRaises(pg.ProgrammingError, upsert, 'test', i2=2, i4=4, i8=8) table = 'upsert_test_table' self.create_table(table, 'n integer primary key, t text') s: dict = dict(n=1, t='x') r: Any = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x') s.update(n=2, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') q = f'select n, t from "{table}" order by n limit 3' r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'z')]) s.update(t='y') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y')]) s.update(t='n') r = upsert(table, s, t="included.t || '2'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y2') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y2')]) s.update(t='y') r = upsert(table, s, t="excluded.t || '3'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['t'], 'y3') r = query(q).getresult() self.assertEqual(r, [(1, 'x'), (2, 'y3')]) s.update(n=1, t='2') r = upsert(table, s, t="included.t || excluded.t") self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['t'], 'x2') r = query(q).getresult() self.assertEqual(r, [(1, 'x2'), (2, 'y3')]) # not existing columns and oid parameter should be ignored s = dict(m=3, u='z') r = upsert(table, s, oid='invalid') self.assertIs(r, s) s = dict(n=2) # do not modify columns missing in the dict r = upsert(table, s) self.assertIs(r, s) r = query(q).getresult() self.assertEqual(r, [(1, 'x2'), (2, 'y3')]) def test_upsert_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") upsert = self.db.upsert get = self.db.get query = self.db.query self.create_table('test_table', 'n int', oids=True, values=[1]) self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2)) r: Any = get('test_table', 1, 'n') self.assertIsInstance(r, dict) self.assertEqual(r['n'], 1) qoid = 'oid(test_table)' self.assertIn(qoid, r) self.assertNotIn('oid', r) oid = r[qoid] self.assertRaises(pg.ProgrammingError, upsert, 'test_table', dict(n=2, oid=oid)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) s = dict(n=2) r = upsert('test_table', s) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, None), (2, None)]) r['oid'] = oid r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertIsNone(r['m']) self.assertIn(qoid, r) self.assertNotIn('oid', r) self.assertNotEqual(r[qoid], oid) r['m'] = 7 r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r.update(n=1, m=3) r = upsert('test_table', r) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 3), (2, 7)]) r = upsert('test_table', r, oid='invalid') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) r['m'] = 5 r = upsert('test_table', r, m=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 5) r.update(n=2, m=1) r = upsert('test_table', r, m='included.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 7) r['m'] = 9 r = upsert('test_table', r, m='excluded.m') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 9) r['m'] = 8 r = upsert('test_table *', r, m='included.m + 1') self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 10) q = query("select n, m from test_table order by n limit 3") self.assertEqual(q.getresult(), [(1, 5), (2, 10)]) def test_upsert_with_composite_key(self): upsert = self.db.upsert query = self.db.query table = 'upsert_test_table_2' self.create_table( table, 'n integer, m integer, t text, primary key (n, m)') s: dict = dict(n=1, m=2, t='x') r: Any = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 2) self.assertEqual(r['t'], 'x') s.update(m=3, t='y') r = upsert(table, s, **dict.fromkeys(s)) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') q = f'select n, m, t from "{table}" order by n, m limit 3' r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'y')]) s.update(t='z') r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=False) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'z') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'z')]) s.update(t='n') r = upsert(table, s, t=True) self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'n') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n')]) s.update(n=2, t='y') r = upsert(table, s, t="'z'") self.assertIs(r, s) self.assertEqual(r['n'], 2) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'y') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'n'), (2, 3, 'y')]) s.update(n=1, t='m') r = upsert(table, s, t='included.t || excluded.t') self.assertIs(r, s) self.assertEqual(r['n'], 1) self.assertEqual(r['m'], 3) self.assertEqual(r['t'], 'nm') r = query(q).getresult() self.assertEqual(r, [(1, 2, 'x'), (1, 3, 'nm'), (2, 3, 'y')]) def test_upsert_with_quoted_names(self): upsert = self.db.upsert query = self.db.query table = 'test table for upsert()' self.create_table(table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') s: dict = {'Prime!': 31, 'much space': 9009, 'Questions?': 'Yes.'} r: Any = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'Yes.') q = f'select * from "{table}" limit 2' r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'Yes.')]) s.update({'Questions?': 'No.'}) r = upsert(table, s) self.assertIs(r, s) self.assertEqual(r['Prime!'], 31) self.assertEqual(r['much space'], 9009) self.assertEqual(r['Questions?'], 'No.') r = query(q).getresult() self.assertEqual(r, [(31, 9009, 'No.')]) def test_upsert_with_generated_columns(self): upsert = self.db.upsert get = self.db.get server_version = self.db.server_version table = 'upsert_test_table_2' table_def = 'i int not null' if server_version >= 100000: table_def += ( ', a int generated always as identity' ', d int generated by default as identity primary key') else: table_def += ', a int not null default 1, d int primary key' if server_version >= 120000: table_def += ', j int generated always as (i + 7) stored' else: table_def += ', j int not null default 42' self.create_table(table, table_def) i, d = 35, 1001 j = i + 7 r: Any = upsert(table, {'i': i, 'd': d, 'a': 1, 'j': j}) self.assertIsInstance(r, dict) self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) r['i'] += 1 r = upsert(table, r) i += 1 if server_version >= 120000: j += 1 self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) r = get(table, d) self.assertEqual(r, {'a': 1, 'd': d, 'i': i, 'j': j}) def test_clear(self): clear = self.db.clear f = False if pg.get_bool() else 'f' r: Any = clear('test') result = dict( i2=0, i4=0, i8=0, d=0, f4=0, f8=0, m=0, v4='', c4='', t='') self.assertEqual(r, result) table = 'clear_test_table' self.create_table( table, 'n integer, f float, b boolean, d date, t text') r = clear(table) result = dict(n=0, f=0, b=f, d='', t='') self.assertEqual(r, result) r['a'] = r['f'] = r['n'] = 1 r['d'] = r['t'] = 'x' r['b'] = 't' r['oid'] = 1 r = clear(table, r) result = dict(a=1, n=0, f=0, b=f, d='', t='', oid=1) self.assertEqual(r, result) def test_clear_with_quoted_names(self): clear = self.db.clear table = 'test table for clear()' self.create_table( table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text') r = clear(table) self.assertIsInstance(r, dict) self.assertEqual(r['Prime!'], 0) self.assertEqual(r['much space'], 0) self.assertEqual(r['Questions?'], '') def test_delete(self): delete = self.db.delete query = self.db.query self.assertRaises(pg.ProgrammingError, delete, 'test', dict(i2=2, i4=4, i8=8)) table = 'delete_test_table' self.create_table(table, 'n integer primary key, t text', oids=False, values=enumerate('xyz', start=1)) self.assertRaises(pg.DatabaseError, self.db.get, table, 4) r: Any = self.db.get(table, 1) s: Any = delete(table, r) self.assertEqual(s, 1) r = self.db.get(table, 3) s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) r = query(f'select * from "{table}"').dictresult() self.assertEqual(len(r), 1) r = r[0] result = {'n': 2, 't': 'y'} self.assertEqual(r, result) r = self.db.get(table, 2) s = delete(table, r) self.assertEqual(s, 1) s = delete(table, r) self.assertEqual(s, 0) self.assertRaises(pg.DatabaseError, self.db.get, table, 2) # not existing columns and oid parameter should be ignored r.update(m=3, u='z', oid='invalid') s = delete(table, r) self.assertEqual(s, 0) def test_delete_with_oids(self): if not self.supports_oids: self.skipTest("database does not support tables with oids") delete = self.db.delete get = self.db.get query = self.db.query self.create_table('test_table', 'n int', oids=True, values=range(1, 7)) r: Any = dict(n=3) self.assertRaises(pg.ProgrammingError, delete, 'test_table', r) s: Any = get('test_table', 1, 'n') qoid = 'oid(test_table)' self.assertIn(qoid, s) r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) q = "select min(n),count(n) from test_table" self.assertEqual(query(q).getresult()[0], (2, 5)) oid = get('test_table', 2, 'n')[qoid] s = dict(oid=oid, n=2) self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', None, oid=oid) self.assertEqual(r, 1) r = delete('test_table', None, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) s = dict(oid=oid, n=2) oid = get('test_table', 3, 'n')[qoid] self.assertRaises(pg.ProgrammingError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) s = get('test_table', 4, 'n') r = delete('test_table *', s) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) oid = get('test_table', 5, 'n')[qoid] s = {qoid: oid, 'm': 4} r = delete('test_table', s, m=6) self.assertEqual(r, 1) r = delete('test_table *', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (6, 1)) query("alter table test_table add column m int") query("alter table test_table add primary key (n)") self.assertIn('m', self.db.get_attnames('test_table', flush=True)) self.assertEqual('n', self.db.pkey('test_table', flush=True)) for i in range(5): query(f"insert into test_table values ({i + 1}, {i + 2})") s = dict(m=2) self.assertRaises(KeyError, delete, 'test_table', s) s = dict(m=2, oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', dict(m=2), oid=oid) self.assertEqual(r, 0) oid = get('test_table', 1, 'n')[qoid] s = dict(oid=oid) self.assertRaises(KeyError, delete, 'test_table', s) r = delete('test_table', s, oid=oid) self.assertEqual(r, 1) r = delete('test_table', s, oid=oid) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (2, 5)) s = get('test_table', 2, 'n') del s['n'] r = delete('test_table', s) self.assertEqual(r, 1) r = delete('test_table', s) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (3, 4)) r = delete('test_table', n=3) self.assertEqual(r, 1) r = delete('test_table', n=3) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (4, 3)) r = delete('test_table', None, n=4) self.assertEqual(r, 1) r = delete('test_table', None, n=4) self.assertEqual(r, 0) self.assertEqual(query(q).getresult()[0], (5, 2)) s = dict(n=6) r = delete('test_table', s, n=5) self.assertEqual(r, 1) r = delete('test_table', s, n=5) self.assertEqual(r, 0) s = get('test_table', 6, 'n') self.assertEqual(s['n'], 6) s['n'] = 7 r = delete('test_table', s) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (None, 0)) def test_delete_with_composite_key(self): query = self.db.query table = 'delete_test_table_1' self.create_table(table, 'n integer primary key, t text', values=enumerate('abc', start=1)) self.assertRaises(KeyError, self.db.delete, table, dict(t='b')) self.assertEqual(self.db.delete(table, dict(n=2)), 1) r: Any = query(f'select t from "{table}" where n=2').getresult() self.assertEqual(r, []) self.assertEqual(self.db.delete(table, dict(n=2)), 0) r = query(f'select t from "{table}" where n=3').getresult()[0][0] self.assertEqual(r, 'c') table = 'delete_test_table_2' self.create_table( table, 'n integer, m integer, t text, primary key (n, m)', values=[(n + 1, m + 1, chr(ord('a') + 2 * n + m)) for n in range(3) for m in range(2)]) self.assertRaises(KeyError, self.db.delete, table, dict(n=2, t='b')) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 1) r = [r[0] for r in query(f'select t from "{table}" where n=2' ' order by m').getresult()] self.assertEqual(r, ['c']) self.assertEqual(self.db.delete(table, dict(n=2, m=2)), 0) r = [r[0] for r in query(f'select t from "{table}" where n=3' ' order by m').getresult()] self.assertEqual(r, ['e', 'f']) self.assertEqual(self.db.delete(table, dict(n=3, m=1)), 1) r = [r[0] for r in query(f'select t from "{table}" where n=3' f' order by m').getresult()] self.assertEqual(r, ['f']) def test_delete_with_quoted_names(self): delete = self.db.delete query = self.db.query table = 'test table for delete()' self.create_table( table, '"Prime!" smallint primary key,' ' "much space" integer, "Questions?" text', values=[(19, 5005, 'Yes!')]) r: Any = {'Prime!': 17} r = delete(table, r) self.assertEqual(r, 0) r = query(f'select count(*) from "{table}"').getresult() self.assertEqual(r[0][0], 1) r = {'Prime!': 19} r = delete(table, r) self.assertEqual(r, 1) r = query(f'select count(*) from "{table}"').getresult() self.assertEqual(r[0][0], 0) def test_delete_referenced(self): delete = self.db.delete query = self.db.query self.create_table( 'test_parent', 'n smallint primary key', values=range(3)) self.create_table( 'test_child', 'n smallint primary key references test_parent', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") self.assertEqual(query(q).getresult()[0], (3, 3)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', None, n=2) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', None, n=2) r: Any = delete('test_child', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (3, 2)) r = delete('test_parent', None, n=2) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 2)) self.assertRaises(pg.IntegrityError, delete, 'test_parent', dict(n=0)) self.assertRaises(pg.IntegrityError, delete, 'test_parent *', dict(n=0)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (2, 1)) r = delete('test_child', dict(n=0)) self.assertEqual(r, 0) r = delete('test_parent', dict(n=0)) self.assertEqual(r, 1) self.assertEqual(query(q).getresult()[0], (1, 1)) r = delete('test_parent', None, n=0) self.assertEqual(r, 0) q = "select n from test_parent natural join test_child limit 2" self.assertEqual(query(q).getresult(), [(1,)]) def test_temp_crud(self): table = 'test_temp_table' self.create_table(table, "n int primary key, t varchar", temporary=True) self.db.insert(table, dict(n=1, t='one')) self.db.insert(table, dict(n=2, t='too')) self.db.insert(table, dict(n=3, t='three')) r: Any = self.db.get(table, 2) self.assertEqual(r['t'], 'too') self.db.update(table, dict(n=2, t='two')) r = self.db.get(table, 2) self.assertEqual(r['t'], 'two') self.db.delete(table, r) r = self.db.query(f'select n, t from {table} order by 1').getresult() self.assertEqual(r, [(1, 'one'), (3, 'three')]) def test_truncate(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, None) self.assertRaises(TypeError, truncate, 42) self.assertRaises(TypeError, truncate, dict(test_table=None)) query = self.db.query self.create_table('test_table', 'n smallint', temporary=False, values=[1] * 3) q = "select count(*) from test_table" r: Any = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) for _i in range(3): query("insert into test_table values (1)") r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate('public.test_table') r = query(q).getresult()[0][0] self.assertEqual(r, 0) self.create_table('test_table_2', 'n smallint', temporary=True) for t in (list, tuple, set): for _i in range(3): query("insert into test_table values (1)") query("insert into test_table_2 values (2)") q = ("select (select count(*) from test_table)," " (select count(*) from test_table_2)") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate(t(['test_table', 'test_table_2'])) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def test_truncate_restart(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', restart='invalid') query = self.db.query self.create_table('test_table', 'n serial, t text') for _n in range(3): query("insert into test_table (t) values ('test')") q = "select count(n), min(n), max(n) from test_table" r: Any = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) truncate('test_table') r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for _n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 4, 6)) truncate('test_table', restart=True) r = query(q).getresult()[0] self.assertEqual(r, (0, None, None)) for _n in range(3): query("insert into test_table (t) values ('test')") r = query(q).getresult()[0] self.assertEqual(r, (3, 1, 3)) def test_truncate_cascade(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', cascade='invalid') query = self.db.query self.create_table('test_parent', 'n smallint primary key', values=range(3)) self.create_table('test_child', 'n smallint primary key references test_parent (n)', values=range(3)) q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r: Any = query(q).getresult()[0] self.assertEqual(r, (3, 3)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate(['test_parent', 'test_child']) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query(f"insert into test_parent (n) values ({n})") query(f"insert into test_child (n) values ({n})") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for n in range(3): query(f"insert into test_parent (n) values ({n})") query(f"insert into test_child (n) values ({n})") r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_child') r = query(q).getresult()[0] self.assertEqual(r, (3, 0)) self.assertRaises(pg.NotSupportedError, truncate, 'test_parent') truncate('test_parent', cascade=True) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) def test_truncate_only(self): truncate = self.db.truncate self.assertRaises(TypeError, truncate, 'test_table', only='invalid') query = self.db.query self.create_table('test_parent', 'n smallint') self.create_table('test_child', 'm smallint) inherits (test_parent') for _n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for _n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent*') r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) for _n in range(3): query("insert into test_parent (n) values (1)") query("insert into test_child (n, m) values (2, 3)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3)) truncate('test_parent', only=True) r = query(q).getresult()[0] self.assertEqual(r, (3, 3)) truncate('test_parent', only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0)) self.assertRaises(ValueError, truncate, 'test_parent*', only=True) truncate('test_parent*', only=False) self.create_table('test_parent_2', 'n smallint') self.create_table('test_child_2', 'm smallint) inherits (test_parent_2') for t in '', '_2': for _n in range(3): query(f"insert into test_parent{t} (n) values (1)") query(f"insert into test_child{t} (n, m) values (2, 3)") q = ("select (select count(*) from test_parent)," " (select count(*) from test_child)," " (select count(*) from test_parent_2)," " (select count(*) from test_child_2)") r = query(q).getresult()[0] self.assertEqual(r, (6, 3, 6, 3)) truncate(['test_parent', 'test_parent_2'], only=[False, True]) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 3, 3)) truncate(['test_parent', 'test_parent_2'], only=False) r = query(q).getresult()[0] self.assertEqual(r, (0, 0, 0, 0)) self.assertRaises( ValueError, truncate, ['test_parent*', 'test_child'], only=[True, False]) truncate(['test_parent*', 'test_child'], only=[False, True]) def test_truncate_quoted(self): truncate = self.db.truncate query = self.db.query table = "test table for truncate()" self.create_table(table, 'n smallint', temporary=False, values=[1] * 3) q = f'select count(*) from "{table}"' r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate(table) r = query(q).getresult()[0][0] self.assertEqual(r, 0) for _i in range(3): query(f'insert into "{table}" values (1)') r = query(q).getresult()[0][0] self.assertEqual(r, 3) truncate(f'public."{table}"') r = query(q).getresult()[0][0] self.assertEqual(r, 0) # noinspection PyUnresolvedReferences def test_get_as_list(self): get_as_list = self.db.get_as_list self.assertRaises(TypeError, get_as_list) self.assertRaises(TypeError, get_as_list, None) query = self.db.query table = 'test_aslist' r: Any = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') names = [(1, 'Homer'), (2, 'Marge'), (3, 'Bart'), (4, 'Lisa'), (5, 'Maggie')] self.create_table( table, 'id smallint primary key, name varchar', values=names) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) for t, n in zip(r, names): self.assertIsInstance(t, tuple) self.assertEqual(t, n) if named: self.assertEqual(t.id, n[0]) self.assertEqual(t.name, n[1]) self.assertEqual(t._asdict(), dict(id=n[0], name=n[1])) r = get_as_list(table, what='name') self.assertIsInstance(r, list) expected: Any = sorted((row[1],) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name, id') self.assertIsInstance(r, list) expected = sorted(tuple(reversed(row)) for row in names) self.assertEqual(r, expected) r = get_as_list(table, what=['name', 'id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, where="name like 'Ba%'") self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, what='name', where="name like 'Ma%'") self.assertIsInstance(r, list) self.assertEqual(r, [('Maggie',), ('Marge',)]) r = get_as_list( table, what='name', where=["name like 'Ma%'", "name like '%r%'"]) self.assertIsInstance(r, list) self.assertEqual(r, [('Marge',)]) r = get_as_list(table, what='name', order='id') self.assertIsInstance(r, list) expected = [(row[1],) for row in names] self.assertEqual(r, expected) r = get_as_list(table, what=['name'], order=['id']) self.assertIsInstance(r, list) self.assertEqual(r, expected) r = get_as_list(table, what=['id', 'name'], order=['id', 'name']) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='id * 2 as num', order='id desc') self.assertIsInstance(r, list) expected = [(n,) for n in range(10, 0, -2)] self.assertEqual(r, expected) r = get_as_list(table, limit=2) self.assertIsInstance(r, list) self.assertEqual(r, names[:2]) r = get_as_list(table, offset=3) self.assertIsInstance(r, list) self.assertEqual(r, names[3:]) r = get_as_list(table, limit=1, offset=2) self.assertIsInstance(r, list) self.assertEqual(r, names[2:3]) r = get_as_list(table, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, list(range(1, 6))) r = get_as_list(table, what='name', scalar=True) self.assertIsInstance(r, list) expected = sorted(row[1] for row in names) self.assertEqual(r, expected) r = get_as_list(table, what='name', limit=1, scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, expected[:1]) query(f'alter table "{table}" drop constraint "{table}_pkey"') self.assertRaises(KeyError, self.db.pkey, table, flush=True) names.insert(1, (1, 'Snowball')) query(f'insert into "{table}" values ($1, $2)', (1, 'Snowball')) r = get_as_list(table) self.assertIsInstance(r, list) self.assertEqual(r, names) r = get_as_list(table, what='name', where='id=1', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, ['Homer', 'Snowball']) # test with unordered query r = get_as_list(table, order=False) self.assertIsInstance(r, list) self.assertEqual(set(r), set(names)) # test with arbitrary from clause from_table = f'(select lower(name) as n2 from "{table}") as t2' r = get_as_list(from_table) self.assertIsInstance(r, list) r = {row[0] for row in r} expected = {row[1].lower() for row in names} self.assertEqual(r, expected) r = get_as_list(from_table, order='n2', scalar=True) self.assertIsInstance(r, list) self.assertEqual(r, sorted(expected)) r = get_as_list(from_table, order='n2', limit=1) self.assertIsInstance(r, list) self.assertEqual(len(r), 1) t = r[0] self.assertIsInstance(t, tuple) if named: self.assertEqual(t.n2, 'bart') self.assertEqual(t._asdict(), dict(n2='bart')) else: self.assertEqual(t, ('bart',)) # noinspection PyUnresolvedReferences def test_get_as_dict(self): get_as_dict = self.db.get_as_dict self.assertRaises(TypeError, get_as_dict) self.assertRaises(TypeError, get_as_dict, None) # the test table has no primary key self.assertRaises(pg.ProgrammingError, get_as_dict, 'test') query = self.db.query table = 'test_asdict' r = query('select 1 as colname').namedresult()[0] self.assertIsInstance(r, tuple) named = hasattr(r, 'colname') colors = [(1, '#7cb9e8', 'Aero'), (2, '#b5a642', 'Brass'), (3, '#b2ffff', 'Celeste'), (4, '#c19a6b', 'Desert')] self.create_table( table, 'id smallint primary key, rgb char(7), name varchar', values=colors) # keyname must be string, list or tuple self.assertRaises(KeyError, get_as_dict, table, 3) self.assertRaises(KeyError, get_as_dict, table, dict(id=None)) # missing keyname in row self.assertRaises(KeyError, get_as_dict, table, keyname='rgb', what='name') r = get_as_dict(table) self.assertIsInstance(r, dict) expected: Any = {row[0]: row[1:] for row in colors} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.rgb, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(rgb=t[0], name=t[1])) self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb') self.assertIsInstance(r, dict) expected = {row[1]: (row[0], row[2]) for row in sorted(colors, key=itemgetter(1))} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) self.assertIn(key, expected) row = r[key] self.assertIsInstance(row, tuple) # noinspection PyTypeChecker t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.id, t[0]) self.assertEqual(row.name, t[1]) self.assertEqual(row._asdict(), dict(id=t[0], name=t[1])) self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb']) self.assertIsInstance(r, dict) expected = {row[:2]: row[2:] for row in colors} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) self.assertIsInstance(key[0], int) self.assertIsInstance(key[1], str) if named: self.assertEqual(key, (key.id, key.rgb)) self.assertEqual(key._fields, ('id', 'rgb')) row = r[key] self.assertIsInstance(row, tuple) self.assertIsInstance(row[0], str) # noinspection PyTypeChecker t = expected[key] self.assertEqual(row, t) if named: self.assertEqual(row.name, t[0]) self.assertEqual(row._asdict(), dict(name=t[0])) self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname=['id', 'rgb'], scalar=True) self.assertIsInstance(r, dict) expected = {row[:2]: row[2] for row in colors} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, tuple) row = r[key] self.assertIsInstance(row, str) # noinspection PyTypeChecker t = expected[key] self.assertEqual(row, t) self.assertEqual(r.keys(), expected.keys()) r = get_as_dict(table, keyname='rgb', what=['rgb', 'name'], scalar=True) self.assertIsInstance(r, dict) expected = {row[1]: row[2] for row in sorted(colors, key=itemgetter(1))} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, str) row = r[key] self.assertIsInstance(row, str) # noinspection PyTypeChecker t = expected[key] self.assertEqual(row, t) self.assertEqual(r.keys(), expected.keys()) r = get_as_dict( table, what='id, name', where="rgb like '#b%'", scalar=True) self.assertIsInstance(r, dict) expected = {row[0]: row[2] for row in colors[1:3]} self.assertEqual(r, expected) for key in r: self.assertIsInstance(key, int) row = r[key] self.assertIsInstance(row, str) t = expected[key] self.assertEqual(row, t) self.assertEqual(r.keys(), expected.keys()) expected = r r = get_as_dict( table, what=['name', 'id'], where=['id > 1', 'id < 4', "rgb like '#b%'", "name not like 'A%'", "name not like '%t'"], scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, what='name, id', limit=2, offset=1, scalar=True) self.assertEqual(r, expected) r = get_as_dict( table, keyname=('id',), what=('name', 'id'), where=('id > 1', 'id < 4'), order=('id',), scalar=True) self.assertEqual(r, expected) r = get_as_dict(table, limit=1) self.assertEqual(len(r), 1) self.assertEqual(r[1][1], 'Aero') r = get_as_dict(table, offset=3) self.assertEqual(len(r), 1) self.assertEqual(r[4][1], 'Desert') r = get_as_dict(table, order='id desc') expected = {row[0]: row[1:] for row in reversed(colors)} self.assertEqual(r, expected) r = get_as_dict(table, where='id > 5') self.assertIsInstance(r, dict) self.assertEqual(len(r), 0) # test with unordered query expected = {row[0]: row[1:] for row in colors} r = get_as_dict(table, order=False) self.assertIsInstance(r, dict) self.assertEqual(r, expected) self.assertNotIsInstance(self, dict) # test with arbitrary from clause from_table = f'(select id, lower(name) as n2 from "{table}") as t2' # primary key must be passed explicitly in this case self.assertRaises(pg.ProgrammingError, get_as_dict, from_table) r = get_as_dict(from_table, 'id') self.assertIsInstance(r, dict) expected = {row[0]: (row[2].lower(),) for row in colors} self.assertEqual(r, expected) # test without a primary key query(f'alter table "{table}" drop constraint "{table}_pkey"') self.assertRaises(KeyError, self.db.pkey, table, flush=True) self.assertRaises(pg.ProgrammingError, get_as_dict, table) r = get_as_dict(table, keyname='id') expected = {row[0]: row[1:] for row in colors} self.assertIsInstance(r, dict) self.assertEqual(r, expected) r = (1, '#007fff', 'Azure') query(f'insert into "{table}" values ($1, $2, $3)', r) # the last entry will win expected[1] = r[1:] r = get_as_dict(table, keyname='id') self.assertEqual(r, expected) def test_transaction(self): query = self.db.query self.create_table('test_table', 'n integer', temporary=False) self.db.begin() query("insert into test_table values (1)") query("insert into test_table values (2)") self.db.commit() self.db.begin() query("insert into test_table values (3)") query("insert into test_table values (4)") self.db.rollback() self.db.begin() query("insert into test_table values (5)") self.db.savepoint('before6') query("insert into test_table values (6)") self.db.rollback('before6') query("insert into test_table values (7)") self.db.commit() self.db.begin() self.db.savepoint('before8') query("insert into test_table values (8)") self.db.release('before8') self.assertRaises(pg.InternalError, self.db.rollback, 'before8') self.db.commit() self.db.start() query("insert into test_table values (9)") self.db.end() r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7, 9]) self.db.begin(mode='read only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.rollback() self.db.start(mode='Read Only') self.assertRaises(pg.InternalError, query, "insert into test_table values (0)") self.db.abort() def test_transaction_aliases(self): self.assertEqual(self.db.begin, self.db.start) self.assertEqual(self.db.commit, self.db.end) self.assertEqual(self.db.rollback, self.db.abort) def test_context_manager(self): query = self.db.query self.create_table('test_table', 'n integer check(n>0)') with self.db: query("insert into test_table values (1)") query("insert into test_table values (2)") try: with self.db: query("insert into test_table values (3)") query("insert into test_table values (4)") raise ValueError('test transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'test transaction should rollback') with self.db: query("insert into test_table values (5)") try: with self.db: query("insert into test_table values (6)") query("insert into test_table values (-1)") except pg.IntegrityError as error: self.assertIn('check', str(error)) with self.db: query("insert into test_table values (7)") r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7]) def test_bytea(self): query = self.db.query self.create_table('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.escape_bytea(s) query('insert into bytea_test values(3, $1)', (r,)) r = query('select * from bytea_test where n=3').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 3) r = r[1] if pg.get_bytea_escaped(): self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def test_insert_update_get_bytea(self): query = self.db.query unescape = pg.unescape_bytea if pg.get_bytea_escaped() else None self.create_table('bytea_test', 'n smallint primary key, data bytea') # insert null value r = self.db.insert('bytea_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) s = b'None' r = self.db.update('bytea_test', n=0, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.update('bytea_test', n=0, data=None) self.assertIsNone(r['data']) # insert as bytes s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" r = self.db.insert('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) # update as bytes s += b"and now even more \x00 nasty \t stuff!\f" r = self.db.update('bytea_test', n=5, data=s) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = query('select * from bytea_test where n=5').getresult() self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 2) self.assertEqual(r[0], 5) r = r[1] if unescape: self.assertNotEqual(r, s) r = unescape(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) r = self.db.get('bytea_test', dict(n=5)) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 5) self.assertIn('data', r) r = r['data'] if unescape: self.assertNotEqual(r, s) r = pg.unescape_bytea(r) self.assertIsInstance(r, bytes) self.assertEqual(r, s) def test_upsert_bytea(self): self.create_table('bytea_test', 'n smallint primary key, data bytea') s = b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n" d = dict(n=7, data=s) d = self.db.upsert('bytea_test', d) self.assertIsInstance(d, dict) self.assertIn('n', d) self.assertEqual(d['n'], 7) self.assertIn('data', d) data = d['data'] if pg.get_bytea_escaped(): self.assertNotEqual(data, s) self.assertIsInstance(data, str) assert isinstance(data, str) # type guard data = pg.unescape_bytea(data) self.assertIsInstance(data, bytes) self.assertEqual(data, s) d['data'] = None d = self.db.upsert('bytea_test', d) self.assertIsInstance(d, dict) self.assertIn('n', d) self.assertEqual(d['n'], 7) self.assertIn('data', d) self.assertIsNone(d['data']) def test_insert_get_json(self): self.create_table('json_test', 'n smallint primary key, data json') jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('json_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('json_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('json_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], str) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('json_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], str) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) # insert JSON object as text self.db.insert('json_test', n=2, data=json.dumps(data)) q = "select data from json_test where n in (1, 2) order by n" r = self.db.query(q).getresult() self.assertEqual(len(r), 2) self.assertIsInstance(r[0][0], str if jsondecode is None else dict) self.assertEqual(r[0][0], r[1][0]) def test_insert_get_jsonb(self): self.create_table('jsonb_test', 'n smallint primary key, data jsonb') jsondecode = pg.get_jsondecode() # insert null value r = self.db.insert('jsonb_test', n=0, data=None) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) r = self.db.get('jsonb_test', 0) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 0) self.assertIn('data', r) self.assertIsNone(r['data']) # insert JSON object data = { "id": 1, "name": "Foo", "price": 1234.5, "new": True, "note": None, "tags": ["Bar", "Eek"], "stock": {"warehouse": 300, "retail": 20}} r = self.db.insert('jsonb_test', n=1, data=data) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], str) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) r = self.db.get('jsonb_test', 1) self.assertIsInstance(r, dict) self.assertIn('n', r) self.assertEqual(r['n'], 1) self.assertIn('data', r) r = r['data'] if jsondecode is None: self.assertIsInstance(r, str) r = json.loads(r) self.assertIsInstance(r, dict) self.assertEqual(r, data) self.assertIsInstance(r['id'], int) self.assertIsInstance(r['name'], str) self.assertIsInstance(r['price'], float) self.assertIsInstance(r['new'], bool) self.assertIsInstance(r['tags'], list) self.assertIsInstance(r['stock'], dict) def test_array(self): returns_arrays = pg.get_array() self.create_table( 'arraytest', 'id smallint, i2 smallint[], i4 integer[], i8 bigint[],' ' d numeric[], f4 real[], f8 double precision[], m money[],' ' b bool[], v4 varchar(4)[], c4 char(4)[], t text[]') r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( id='smallint', i2='smallint[]', i4='integer[]', i8='bigint[]', d='numeric[]', f4='real[]', f8='double precision[]', m='money[]', b='boolean[]', v4='character varying[]', c4='character[]', t='text[]')) else: self.assertEqual(r, dict( id='int', i2='int[]', i4='int[]', i8='int[]', d='num[]', f4='float[]', f8='float[]', m='money[]', b='bool[]', v4='text[]', c4='text[]', t='text[]')) decimal = pg.get_decimal() if decimal is Decimal: long_decimal = decimal('123456789.123456789') odd_money = decimal('1234567891234567.89') else: long_decimal = decimal('12345671234.5') odd_money = decimal('1234567123.25') t, f = (True, False) if pg.get_bool() else ('t', 'f') data = dict( id=42, i2=[42, 1234, None, 0, -1], i4=[42, 123456789, None, 0, 1, -1], i8=[42, 123456789123456789, None, 0, 1, -1], d=[decimal(42), long_decimal, None, decimal(0), decimal(1), decimal(-1), -long_decimal], f4=[42.0, 1234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], f8=[42.0, 12345671234.5, None, 0.0, 1.0, -1.0, float('inf'), float('-inf')], m=[decimal('42.00'), odd_money, None, decimal('0.00'), decimal('1.00'), decimal('-1.00'), -odd_money], b=[t, f, t, None, f, t, None, None, t], v4=['abc', '"Hi"', '', None], c4=['abc ', '"Hi"', ' ', None], t=['abc', 'Hello, World!', '"Hello, World!"', '', None]) r = data.copy() self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') self.db.insert('arraytest', r) r = self.db.get('arraytest', 42, 'id') if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') r = self.db.query('select * from arraytest limit 1').dictresult()[0] if returns_arrays: self.assertEqual(r, data) else: self.assertEqual(r['i4'], '{42,123456789,NULL,0,1,-1}') def test_array_literal(self): insert = self.db.insert returns_arrays = pg.get_array() self.create_table('arraytest', 'i int[], t text[]') r = dict(i=[1, 2, 3], t=['a', 'b', 'c']) insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i='{1,2,3}', t='{a,b,c}') self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') Lit = pg.Literal # noqa: N806 r = dict(i=Lit("ARRAY[1, 2, 3]"), t=Lit("ARRAY['a', 'b', 'c']")) self.db.insert('arraytest', r) if returns_arrays: self.assertEqual(r['i'], [1, 2, 3]) self.assertEqual(r['t'], ['a', 'b', 'c']) else: self.assertEqual(r['i'], '{1,2,3}') self.assertEqual(r['t'], '{a,b,c}') r = dict(i="1, 2, 3", t="'a', 'b', 'c'") self.assertRaises(pg.DataError, self.db.insert, 'arraytest', r) def test_array_of_ids(self): array_on = pg.get_array() self.create_table( 'arraytest', 'i serial primary key, c cid[], o oid[], x xid[]') r = self.db.get_attnames('arraytest') if self.regtypes: self.assertEqual(r, dict( i='integer', c='cid[]', o='oid[]', x='xid[]')) else: self.assertEqual(r, dict( i='int', c='int[]', o='int[]', x='int[]')) data = dict(i=1, c=[11, 12, 13], o=[21, 22, 23], x=[31, 32, 33]) r = data.copy() self.db.insert('arraytest', r) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') self.db.get('arraytest', r) if array_on: self.assertEqual(r, data) else: self.assertEqual(r['o'], '{21,22,23}') def test_array_of_text(self): array_on = pg.get_array() self.create_table('arraytest', 'id serial primary key, data text[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = ['Hello, World!', '', None, '{a,b,c}', '"Hi!"', 'null', 'NULL', 'Null', 'nulL', "It's all \\ kinds of\r nasty stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data']) self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], str) self.assertIsNone(r['data'][2]) # noinspection PyUnresolvedReferences def test_array_of_bytea(self): array_on = pg.get_array() bytea_escaped = pg.get_bytea_escaped() self.create_table('arraytest', 'id serial primary key, data bytea[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'bytea[]') data = [b'Hello, World!', b'', None, b'{a,b,c}', b'"Hi!"', b"It's all \\ kinds \x00 of\r nasty \xff stuff!\n"] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertIsInstance(r['data'], list) if array_on and not bytea_escaped: self.assertEqual(r['data'], data) self.assertIsInstance(r['data'][1], bytes) self.assertIsNone(r['data'][2]) else: self.assertNotEqual(r['data'], data) def test_array_of_json(self): self.create_table('arraytest', 'id serial primary key, data json[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') def test_array_of_jsonb(self): self.create_table('arraytest', 'id serial primary key, data jsonb[]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'jsonb[]' if self.regtypes else 'json[]') data = [dict(id=815, name='John Doe'), dict(id=816, name='Jane Roe')] array_on = pg.get_array() jsondecode = pg.get_jsondecode() r = dict(data=data) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r = dict(data=[json.dumps(d) for d in data]) self.db.insert('arraytest', r) if not array_on: r['data'] = pg.cast_array(r['data'], jsondecode) if jsondecode is None: r['data'] = [json.loads(d) for d in r['data']] self.assertEqual(r['data'], data) r['data'] = None self.db.get('arraytest', r) # insert empty json values r = dict(data=['', None]) self.db.insert('arraytest', r) r = r['data'] if array_on: self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertIsNone(r[0]) self.assertIsNone(r[1]) else: self.assertEqual(r, '{NULL,NULL}') # noinspection PyUnresolvedReferences def test_deep_array(self): array_on = pg.get_array() self.create_table( 'arraytest', 'id serial primary key, data text[][][]') r = self.db.get_attnames('arraytest') self.assertEqual(r['data'], 'text[]') data = [[['Hello, World!', '{a,b,c}', 'back\\slash']]] r = dict(data=data) self.db.insert('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) r['data'] = None self.db.get('arraytest', r) if array_on: self.assertEqual(r['data'], data) else: self.assertTrue(r['data'].startswith('{{{"Hello,')) # noinspection PyUnresolvedReferences def test_insert_update_get_record(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint, married bool,' ' weight real, salary money)') self.addCleanup(query, 'drop type test_person_type') self.create_table('test_person', 'id serial primary key, person test_person_type', oids=False, temporary=False) attnames = self.db.get_attnames('test_person') self.assertEqual(len(attnames), 2) self.assertIn('id', attnames) self.assertIn('person', attnames) person_typ = attnames['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict( name='character varying', age='smallint', married='boolean', weight='real', salary='money')) else: self.assertEqual(person_typ.attnames, dict( name='text', age='int', married='bool', weight='float', salary='money')) decimal = pg.get_decimal() bool_class: type t: bool | str f: bool | str if pg.get_bool(): bool_class = bool t, f = True, False else: bool_class = str t, f = 't', 'f' person: tuple = ('John Doe', 61, t, 99.5, decimal('93456.75')) r: Any = self.db.insert('test_person', None, person=person) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = ('Jane Roe', 59, f, 64.5, decimal('96543.25')) r['person'] = person self.db.update('test_person', r) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 1) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'Jane Roe') self.assertIsInstance(p.name, str) self.assertIsInstance(p.age, int) self.assertIsInstance(p.married, bool_class) self.assertIsInstance(p.weight, float) self.assertIsInstance(p.salary, decimal) person = (None,) * 5 r = self.db.insert('test_person', None, person=person) self.assertEqual(r['id'], 2) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 2) p = r['person'] self.assertIsInstance(p, tuple) self.assertIsNone(p.name) self.assertIsNone(p.age) self.assertIsNone(p.married) self.assertIsNone(p.weight) self.assertIsNone(p.salary) r = self.db.insert('test_person', None, person=None) self.assertEqual(r['id'], 3) self.assertIsNone(r['person']) r['person'] = None self.db.get('test_person', r) self.assertEqual(r['id'], 3) self.assertIsNone(r['person']) # noinspection PyUnresolvedReferences def test_record_insert_bytea(self): query = self.db.query query('create type test_person_type as' ' (name text, picture bytea)') self.addCleanup(query, 'drop type test_person_type') self.create_table('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', picture='bytea')) person = ('John Doe', b'O\x00ps\xff!') r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.picture, person[1]) self.assertIsInstance(p.picture, bytes) def test_record_insert_json(self): query = self.db.query query('create type test_person_type as (name text, data json)') self.addCleanup(query, 'drop type test_person_type') self.create_table('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] self.assertEqual(person_typ.attnames, dict(name='text', data='json')) person = ('John Doe', dict(age=61, married=True, weight=99.5)) r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) if pg.get_jsondecode() is None: # noinspection PyUnresolvedReferences p = p._replace(data=json.loads(p.data)) self.assertEqual(p, person) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.data, person[1]) self.assertIsInstance(p.data, dict) # noinspection PyUnresolvedReferences def test_record_literal(self): query = self.db.query query('create type test_person_type as' ' (name varchar, age smallint)') self.addCleanup(query, 'drop type test_person_type') self.create_table('test_person', 'person test_person_type', temporary=False) person_typ = self.db.get_attnames('test_person')['person'] if self.regtypes: self.assertEqual(person_typ, 'test_person_type') else: self.assertEqual(person_typ, 'record') if self.regtypes: self.assertEqual(person_typ.attnames, dict(name='character varying', age='smallint')) else: self.assertEqual(person_typ.attnames, dict(name='text', age='int')) person = pg.Literal("('John Doe', 61)") r = self.db.insert('test_person', None, person=person) p = r['person'] self.assertIsInstance(p, tuple) self.assertEqual(p.name, 'John Doe') self.assertIsInstance(p.name, str) self.assertEqual(p.age, 61) self.assertIsInstance(p.age, int) def test_date(self): query = self.db.query for datestyle in ( 'ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = date(2016, 3, 14) q = "select $1::date" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, date) self.assertEqual(r, d) q = "select '10000-08-01'::date, '0099-01-08 BC'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) q = "select 'infinity'::date, '-infinity'::date" r = query(q).getresult()[0] self.assertIsInstance(r[0], date) self.assertIsInstance(r[1], date) self.assertEqual(r[0], date.max) self.assertEqual(r[1], date.min) def test_time(self): query = self.db.query d = time(15, 9, 26) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897) q = "select $1::time" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def test_timetz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = f'{timezones[timezone]:+03d}00' tzinfo = datetime.strptime(tz, '%z').tzinfo self.db.set_parameter('timezone', timezone) d = time(15, 9, 26, tzinfo=tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) d = time(15, 9, 26, 535897, tzinfo) q = "select $1::timetz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, time) self.assertEqual(r, d) def test_timestamp(self): query = self.db.query for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897) q = "select $1::timestamp" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamp," " '0099-01-08 BC'::timestamp") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamp, '-infinity'::timestamp" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def test_timestamptz(self): query = self.db.query timezones = dict(CET=1, EET=2, EST=-5, UTC=0) for timezone in sorted(timezones): tz = f'{timezones[timezone]:+03d}00' tzinfo = datetime.strptime(tz, '%z').tzinfo self.db.set_parameter('timezone', timezone) for datestyle in ('ISO', 'Postgres, MDY', 'Postgres, DMY', 'SQL, MDY', 'SQL, DMY', 'German'): self.db.set_parameter('datestyle', datestyle) d = datetime(2016, 3, 14, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, tzinfo=tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) d = datetime(2016, 3, 14, 15, 9, 26, 535897, tzinfo) q = "select $1::timestamptz" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, datetime) self.assertEqual(r, d) q = ("select '10000-08-01 AD'::timestamptz," " '0099-01-08 BC'::timestamptz") r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) q = "select 'infinity'::timestamptz, '-infinity'::timestamptz" r = query(q).getresult()[0] self.assertIsInstance(r[0], datetime) self.assertIsInstance(r[1], datetime) self.assertEqual(r[0], datetime.max) self.assertEqual(r[1], datetime.min) def test_interval(self): query = self.db.query for intervalstyle in ( 'sql_standard', 'postgres', 'postgres_verbose', 'iso_8601'): self.db.set_parameter('intervalstyle', intervalstyle) d = timedelta(3) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(-30) r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) d = timedelta(hours=3, minutes=31, seconds=42, microseconds=5678) q = "select $1::interval" r = query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, timedelta) self.assertEqual(r, d) def test_date_and_time_arrays(self): dt = (date(2016, 3, 14), time(15, 9, 26)) q = "select ARRAY[$1::date], ARRAY[$2::time]" r = self.db.query(q, dt).getresult()[0] self.assertIsInstance(r[0], list) self.assertEqual(r[0][0], dt[0]) self.assertIsInstance(r[1], list) self.assertEqual(r[1][0], dt[1]) def test_hstore(self): try: self.db.query("select 'k=>v'::hstore") except pg.DatabaseError: try: self.db.query("create extension hstore") except pg.DatabaseError: self.skipTest("hstore extension not enabled") d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} q = "select $1::hstore" r = self.db.query(q, (pg.Hstore(d),)).getresult()[0][0] self.assertIsInstance(r, dict) self.assertEqual(r, d) def test_uuid(self): d = UUID('{12345678-1234-5678-1234-567812345678}') q = 'select $1::uuid' r = self.db.query(q, (d,)).getresult()[0][0] self.assertIsInstance(r, UUID) self.assertEqual(r, d) def test_db_types_info(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('numeric', dbtypes) typ = dbtypes['numeric'] self.assertIn('numeric', dbtypes) self.assertEqual(typ, 'numeric' if self.regtypes else 'num') self.assertEqual(typ.oid, 1700) self.assertEqual(typ.pgtype, 'numeric') self.assertEqual(typ.regtype, 'numeric') self.assertEqual(typ.simple, 'num') self.assertEqual(typ.typlen, -1) self.assertEqual(typ.typtype, 'b') self.assertEqual(typ.category, 'N') self.assertEqual(typ.delim, ',') self.assertEqual(typ.relid, 0) self.assertIs(dbtypes[1700], typ) self.assertNotIn('pg_type', dbtypes) typ = dbtypes['pg_type'] self.assertIn('pg_type', dbtypes) self.assertEqual(typ, 'pg_type' if self.regtypes else 'record') self.assertIsInstance(typ.oid, int) self.assertEqual(typ.pgtype, 'pg_type') self.assertEqual(typ.regtype, 'pg_type') self.assertEqual(typ.simple, 'record') self.assertEqual(typ.typlen, -1) self.assertEqual(typ.typtype, 'c') self.assertEqual(typ.category, 'C') self.assertEqual(typ.delim, ',') self.assertNotEqual(typ.relid, 0) attnames = typ.attnames self.assertIsInstance(attnames, dict) # noinspection PyUnresolvedReferences self.assertIs(attnames, dbtypes.get_attnames('pg_type')) self.assertIn('typname', attnames) typname = attnames['typname'] self.assertEqual(typname, 'name' if self.regtypes else 'text') self.assertEqual(typname.typlen, 64) # base self.assertEqual(typname.typtype, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertIn('typlen', attnames) typlen = attnames['typlen'] self.assertEqual(typlen, 'smallint' if self.regtypes else 'int') self.assertEqual(typlen.typlen, 2) # base self.assertEqual(typlen.typtype, 'b') # base self.assertEqual(typlen.category, 'N') # numeric # noinspection PyUnresolvedReferences def test_db_types_typecast(self): dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast('int4') self.assertIs(dbtypes.get_typecast('int4'), int) dbtypes.set_typecast('int4', float) self.assertIs(dbtypes.get_typecast('int4'), float) dbtypes.reset_typecast() self.assertIs(dbtypes.get_typecast('int4'), int) self.assertNotIn('circle', dbtypes) self.assertIsNone(dbtypes.get_typecast('circle')) squared_circle = lambda v: f'Squared Circle: {v}' # noqa: E731 dbtypes.set_typecast('circle', squared_circle) self.assertIs(dbtypes.get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') self.assertEqual( dbtypes.typecast('Impossible', 'circle'), 'Squared Circle: Impossible') dbtypes.reset_typecast('circle') self.assertIsNone(dbtypes.get_typecast('circle')) def test_get_set_type_cast(self): get_typecast = pg.get_typecast set_typecast = pg.set_typecast dbtypes = self.db.dbtypes self.assertIsInstance(dbtypes, dict) self.assertNotIn('int4', dbtypes) self.assertNotIn('real', dbtypes) self.assertNotIn('bool', dbtypes) self.assertIs(get_typecast('int4'), int) self.assertIs(get_typecast('float4'), float) from pg.cast import cast_bool self.assertIs(get_typecast('bool'), cast_bool) cast_circle = get_typecast('circle') self.addCleanup(set_typecast, 'circle', cast_circle) squared_circle = lambda v: f'Squared Circle: {v}' # noqa: E731 self.assertNotIn('circle', dbtypes) set_typecast('circle', squared_circle) self.assertNotIn('circle', dbtypes) self.assertIs(get_typecast('circle'), squared_circle) r = self.db.query("select '0,0,1'::circle").getresult()[0][0] self.assertIn('circle', dbtypes) self.assertEqual(r, 'Squared Circle: <(0,0),1>') set_typecast('circle', cast_circle) self.assertIs(get_typecast('circle'), cast_circle) def test_notification_handler(self): # the notification handler itself is tested separately f = self.db.notification_handler callback = lambda arg_dict: None # noqa: E731 handler = f('test', callback) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test') self.assertEqual(handler.stop_event, 'stop_test') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test2', callback, timeout=2) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test2') self.assertEqual(handler.stop_event, 'stop_test2') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertEqual(handler.timeout, 2) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 3} handler = f('test3', callback, arg_dict=arg_dict) self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test3') self.assertEqual(handler.stop_event, 'stop_test3') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 3) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) handler = f('test4', callback, stop_event='stop4') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test4') self.assertEqual(handler.stop_event, 'stop4') self.assertIs(handler.callback, callback) self.assertIsInstance(handler.arg_dict, dict) self.assertEqual(handler.arg_dict, {}) self.assertIsNone(handler.timeout) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) arg_dict = {'testing': 5} handler = f('test5', callback, arg_dict, 1.5, 'stop5') self.assertIsInstance(handler, pg.NotificationHandler) self.assertIs(handler.db, self.db) self.assertEqual(handler.event, 'test5') self.assertEqual(handler.stop_event, 'stop5') self.assertIs(handler.callback, callback) self.assertIs(handler.arg_dict, arg_dict) self.assertEqual(arg_dict['testing'], 5) self.assertEqual(handler.timeout, 1.5) self.assertFalse(handler.listening) handler.close() self.assertIsNone(handler.db) self.db.reopen() self.assertIsNone(handler.db) def test_inserttable_from_query(self): # use inserttable() to copy from one table to another query = self.db.query self.create_table('test_table_from', 'n integer, t timestamp') self.create_table('test_table_to', 'n integer, t timestamp') for i in range(1, 4): query("insert into test_table_from values ($1, now())", i) n = self.db.inserttable( 'test_table_to', query("select n, t::text from test_table_from")) data_from = query("select * from test_table_from").getresult() data_to = query("select * from test_table_to").getresult() self.assertEqual(n, 3) self.assertEqual([row[0] for row in data_from], [1, 2, 3]) self.assertEqual(data_from, data_to) class TestDBClassNonStdOpts(TestDBClass): """Test the methods of the DB class with non-standard global options.""" saved_options: ClassVar[dict[str, Any]] = {} @classmethod def setUpClass(cls): cls.saved_options.clear() cls.set_option('decimal', float) not_bool = not pg.get_bool() cls.set_option('bool', not_bool) not_array = not pg.get_array() cls.set_option('array', not_array) not_bytea_escaped = not pg.get_bytea_escaped() cls.set_option('bytea_escaped', not_bytea_escaped) cls.set_option('jsondecode', None) db = DB() cls.regtypes = not db.use_regtypes() db.close() super().setUpClass() @classmethod def tearDownClass(cls): super().tearDownClass() cls.reset_option('jsondecode') cls.reset_option('bool') cls.reset_option('array') cls.reset_option('bytea_escaped') cls.reset_option('decimal') @classmethod def set_option(cls, option, value): # noinspection PyUnresolvedReferences cls.saved_options[option] = getattr(pg, 'get_' + option)() return getattr(pg, 'set_' + option)(value) @classmethod def reset_option(cls, option): # noinspection PyUnresolvedReferences return getattr(pg, 'set_' + option)(cls.saved_options[option]) class TestDBClassAdapter(unittest.TestCase): """Test the adapter object associated with the DB class.""" def setUp(self): self.db = DB() self.adapter = self.db.adapter def tearDown(self): with suppress(pg.InternalError): self.db.close() def test_guess_simple_type(self): f = self.adapter.guess_simple_type self.assertEqual(f(pg.Bytea(b'test')), 'bytea') self.assertEqual(f('string'), 'text') self.assertEqual(f(b'string'), 'text') self.assertEqual(f(True), 'bool') self.assertEqual(f(3), 'int') self.assertEqual(f(2.75), 'float') self.assertEqual(f(Decimal('4.25')), 'num') self.assertEqual(f(date(2016, 1, 30)), 'date') self.assertEqual(f([1, 2, 3]), 'int[]') self.assertEqual(f([[[123]]]), 'int[]') self.assertEqual(f(['a', 'b', 'c']), 'text[]') self.assertEqual(f([[['abc']]]), 'text[]') self.assertEqual(f([False, True]), 'bool[]') self.assertEqual(f([[[False]]]), 'bool[]') r = f(('string', True, 3, 2.75, [1], [False])) self.assertEqual(r, 'record') self.assertEqual(list(r.attnames.values()), [ 'text', 'bool', 'int', 'float', 'int[]', 'bool[]']) def test_adapt_query_typed_list(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', (1, 2), ('int2',)) self.assertRaises( TypeError, format_query, '%s,%s', (1,), ('int2', 'int2')) values: list | tuple = (3, 7.5, 'hello', True) types: list | tuple = ('int4', 'float4', 'text', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) types = ('bool', 'bool', 'bool', 'bool') sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, ['t', 't', 'f', 't']) values = ('2016-01-30', 'current_date') types = ('date', 'date') sql, params = format_query("values(%s,%s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = ([1, 2, 3], ['a', 'b', 'c']) types = ('_int4', '_text') sql, params = format_query("%s::int4[],%s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = ('_bool', '_bool') sql, params = format_query("%s::bool[],%s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] t = self.adapter.simple_type typ = t('record') from pg.attrs import AttrDict typ._get_attnames = lambda _self: AttrDict( i=t('int'), f=t('float'), t=t('text'), b=t('bool'), i3=t('int[]'), t3=t('text[]')) types = [typ] sql, params = format_query('select %s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) values = [(0, -3.25, '', False, [0], [''])] sql, params = format_query('select %s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(0,-3.25,"",f,{0},"{\\"\\"}")']) def test_adapt_query_typed_list_with_types_as_string(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', (1, 2), 'int2') self.assertRaises( TypeError, format_query, '%s,%s', (1,), 'int2 int2') values = (3, 7.5, 'hello', True) types = 'int4 float4 text bool' # pass types as string sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) def test_adapt_query_typed_list_with_types_as_classes(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s,%s', (1, 2), (int,)) self.assertRaises( TypeError, format_query, '%s,%s', (1,), (int, int)) values = (3, 7.5, 'hello', True) types = (int, float, str, bool) # pass types as classes sql, params = format_query("select %s,%s,%s,%s", values, types) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) def test_adapt_query_typed_list_with_json(self): format_query = self.adapter.format_query value: Any = {'test': [1, "it's fine", 3]} sql, params = format_query("select %s", (value,), 'json') self.assertEqual(sql, 'select $1') self.assertEqual(params, ['{"test": [1, "it\'s fine", 3]}']) value = pg.Json({'test': [1, "it's fine", 3]}) sql, params = format_query("select %s", (value,), 'json') self.assertEqual(sql, 'select $1') self.assertEqual(params, ['{"test": [1, "it\'s fine", 3]}']) value = {'test': [1, "it's fine", 3]} sql, params = format_query("select %s", [value], [pg.Json]) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['{"test": [1, "it\'s fine", 3]}']) def test_adapt_query_typed_with_hstore(self): format_query = self.adapter.format_query value: Any = {'one': "it's fine", 'two': 2} sql, params = format_query("select %s", (value,), 'hstore') self.assertEqual(sql, "select $1") self.assertEqual(params, ['one=>"it\'s fine\",two=>2']) value = pg.Hstore({'one': "it's fine", 'two': 2}) sql, params = format_query("select %s", (value,), 'hstore') self.assertEqual(sql, "select $1") self.assertEqual(params, ['one=>"it\'s fine\",two=>2']) value = pg.Hstore({'one': "it's fine", 'two': 2}) sql, params = format_query("select %s", [value], [pg.Hstore]) self.assertEqual(sql, "select $1") self.assertEqual(params, ['one=>"it\'s fine\",two=>2']) def test_adapt_query_typed_with_uuid(self): format_query = self.adapter.format_query value: Any = '12345678-1234-5678-1234-567812345678' sql, params = format_query("select %s", (value,), 'uuid') self.assertEqual(sql, "select $1") self.assertEqual(params, ['12345678-1234-5678-1234-567812345678']) value = UUID('{12345678-1234-5678-1234-567812345678}') sql, params = format_query("select %s", (value,), 'uuid') self.assertEqual(sql, "select $1") self.assertEqual(params, ['12345678-1234-5678-1234-567812345678']) value = UUID('{12345678-1234-5678-1234-567812345678}') sql, params = format_query("select %s", (value,)) self.assertEqual(sql, "select $1") self.assertEqual(params, ['12345678-1234-5678-1234-567812345678']) def test_adapt_query_typed_dict(self): format_query = self.adapter.format_query self.assertRaises( TypeError, format_query, '%s,%s', dict(i1=1, i2=2), dict(i1='int2')) values: dict = dict(i=3, f=7.5, t='hello', b=True) types: dict = dict(i='int4', f='float4', t='text', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) types = dict(i='bool', f='bool', t='bool', b='bool') sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, types) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 't', 't', 'f']) values = dict(d1='2016-01-30', d2='current_date') types = dict(d1='date', d2='date') sql, params = format_query("values(%(d1)s,%(d2)s)", values, types) self.assertEqual(sql, 'values($1,current_date)') self.assertEqual(params, ['2016-01-30']) values = dict(i=[1, 2, 3], t=['a', 'b', 'c']) types = dict(i='_int4', t='_text') sql, params = format_query( "%(i)s::int4[],%(t)s::text[]", values, types) self.assertEqual(sql, '$1::int4[],$2::text[]') self.assertEqual(params, ['{1,2,3}', '{a,b,c}']) types = dict(i='_bool', t='_bool') sql, params = format_query( "%(i)s::bool[],%(t)s::bool[]", values, types) self.assertEqual(sql, '$1::bool[],$2::bool[]') self.assertEqual(params, ['{t,t,t}', '{f,f,f}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) t = self.adapter.simple_type typ = t('record') from pg.attrs import AttrDict typ._get_attnames = lambda _self: AttrDict( i=t('int'), f=t('float'), t=t('text'), b=t('bool'), i3=t('int[]'), t3=t('text[]')) types = dict(record=typ) sql, params = format_query('select %(record)s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) values = dict(record=(0, -3.25, '', False, [0], [''])) sql, params = format_query('select %(record)s', values, types) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(0,-3.25,"",f,{0},"{\\"\\"}")']) def test_adapt_query_untyped_list(self): format_query = self.adapter.format_query values: list | tuple = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values) self.assertEqual(sql, 'select $1,$2,$3,$4') self.assertEqual(params, [3, 7.5, 'hello', 't']) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, values) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, ['{1,2,3}', '{a,b,c}', '{t,f,t}']) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values) self.assertEqual(sql, "$1,$2,$3") self.assertEqual(params, [ '{{1,2},{3,4}}', '{{a,b},{c,d}}', '{{t,f},{f,t}}']) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) values = [(0, -3.25, '', False, [0], [''])] sql, params = format_query('select %s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(0,-3.25,"",f,{0},"{\\"\\"}")']) def test_adapt_query_untyped_list_with_json(self): format_query = self.adapter.format_query value = pg.Json({'test': [1, "it's fine", 3]}) sql, params = format_query("select %s", (value,)) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['{"test": [1, "it\'s fine", 3]}']) def test_adapt_query_untyped_with_hstore(self): format_query = self.adapter.format_query value = pg.Hstore({'one': "it's fine", 'two': 2}) sql, params = format_query("select %s", (value,)) self.assertEqual(sql, "select $1") self.assertEqual(params, ['one=>"it\'s fine\",two=>2']) def test_adapt_query_untyped_dict(self): format_query = self.adapter.format_query values: dict = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values) self.assertEqual(sql, 'select $3,$2,$4,$1') self.assertEqual(params, ['t', 7.5, 3, 'hello']) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query("values(%(d1)s,%(d2)s)", values) self.assertEqual(sql, 'values($1,$2)') self.assertEqual(params, [values['d1'], values['d2']]) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, ['{t,f,t}', '{1,2,3}', '{a,b,c}']) values = dict( i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values) self.assertEqual(sql, "$2,$3,$1") self.assertEqual(params, [ '{{t,f},{f,t}}', '{{1,2},{3,4}}', '{{a,b},{c,d}}']) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(3,7.5,hello,t,{123},{abc})']) values = dict(record=(0, -3.25, '', False, [0], [''])) sql, params = format_query('select %(record)s', values) self.assertEqual(sql, 'select $1') self.assertEqual(params, ['(0,-3.25,"",f,{0},"{\\"\\"}")']) def test_adapt_query_inline_list(self): format_query = self.adapter.format_query values: list | tuple = (3, 7.5, 'hello', True) sql, params = format_query("select %s,%s,%s,%s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = [date(2016, 1, 30), 'current_date'] sql, params = format_query("values(%s,%s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = ([1, 2, 3], ['a', 'b', 'c'], [True, False, True]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual( sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = ([[1, 2], [3, 4]], [['a', 'b'], ['c', 'd']], [[True, False], [False, True]]) sql, params = format_query("%s,%s,%s", values, inline=True) self.assertEqual( sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = [(3, 7.5, 'hello', True, [123], ['abc'])] sql, params = format_query('select %s', values, inline=True) self.assertEqual( sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) values = [(0, -3.25, '', False, [0], [''])] sql, params = format_query('select %s', values, inline=True) self.assertEqual( sql, "select (0,-3.25,'',false,ARRAY[0],ARRAY[''])") self.assertEqual(params, []) def test_adapt_query_inline_list_with_json(self): format_query = self.adapter.format_query value = pg.Json({'test': [1, "it's fine", 3]}) sql, params = format_query("select %s", (value,), inline=True) self.assertEqual( sql, "select '{\"test\": [1, \"it''s fine\", 3]}'::json") self.assertEqual(params, []) def test_adapt_query_inline_list_with_hstore(self): format_query = self.adapter.format_query value = pg.Hstore({'one': "it's fine", 'two': 2}) sql, params = format_query("select %s", (value,), inline=True) self.assertEqual( sql, "select 'one=>\"it''s fine\",two=>2'::hstore") self.assertEqual(params, []) def test_adapt_query_inline_dict(self): format_query = self.adapter.format_query values: dict = dict(i=3, f=7.5, t='hello', b=True) sql, params = format_query( "select %(i)s,%(f)s,%(t)s,%(b)s", values, inline=True) self.assertEqual(sql, "select 3,7.5,'hello',true") self.assertEqual(params, []) values = dict(d1='2016-01-30', d2='current_date') sql, params = format_query( "values(%(d1)s,%(d2)s)", values, inline=True) self.assertEqual(sql, "values('2016-01-30','current_date')") self.assertEqual(params, []) values = dict(i=[1, 2, 3], t=['a', 'b', 'c'], b=[True, False, True]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual( sql, "ARRAY[1,2,3],ARRAY['a','b','c'],ARRAY[true,false,true]") self.assertEqual(params, []) values = dict( i=[[1, 2], [3, 4]], t=[['a', 'b'], ['c', 'd']], b=[[True, False], [False, True]]) sql, params = format_query("%(i)s,%(t)s,%(b)s", values, inline=True) self.assertEqual( sql, "ARRAY[[1,2],[3,4]],ARRAY[['a','b'],['c','d']]," "ARRAY[[true,false],[false,true]]") self.assertEqual(params, []) values = dict(record=(3, 7.5, 'hello', True, [123], ['abc'])) sql, params = format_query('select %(record)s', values, inline=True) self.assertEqual( sql, "select (3,7.5,'hello',true,ARRAY[123],ARRAY['abc'])") self.assertEqual(params, []) values = dict(record=(0, -3.25, '', False, [0], [''])) sql, params = format_query('select %(record)s', values, inline=True) self.assertEqual( sql, "select (0,-3.25,'',false,ARRAY[0],ARRAY[''])") self.assertEqual(params, []) def test_adapt_query_with_pg_repr(self): format_query = self.adapter.format_query self.assertRaises(TypeError, format_query, '%s', object(), inline=True) class TestObject: # noinspection PyMethodMayBeStatic def __pg_repr__(self): return "'adapted'" sql, params = format_query('select %s', [TestObject()], inline=True) self.assertEqual(sql, "select 'adapted'") self.assertEqual(params, []) sql, params = format_query('select %s', [[TestObject()]], inline=True) self.assertEqual(sql, "select ARRAY['adapted']") self.assertEqual(params, []) class TestSchemas(unittest.TestCase): """Test correct handling of schemas (namespaces).""" cls_set_up = False with_oids = "" @classmethod def setUpClass(cls): db = DB() cls.with_oids = "with oids" if db.server_version < 120000 else "" query = db.query for num_schema in range(5): if num_schema: schema = f"s{num_schema}" query(f"drop schema if exists {schema} cascade") try: query(f"create schema {schema}") except pg.ProgrammingError as e: raise RuntimeError( "The test user cannot create schemas.\n" f"Grant create on database {dbname} to the user" " for running these tests.") from e else: schema = "public" query(f"drop table if exists {schema}.t") query(f"drop table if exists {schema}.t{num_schema}") query(f"create table {schema}.t {cls.with_oids}" f" as select 1 as n, {num_schema} as d") query(f"create table {schema}.t{num_schema} {cls.with_oids}" f" as select 1 as n, {num_schema} as d") db.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() query = db.query for num_schema in range(5): if num_schema: schema = f"s{num_schema}" query(f"drop schema {schema} cascade") else: schema = "public" query(f"drop table {schema}.t") query(f"drop table {schema}.t{num_schema}") db.close() def setUp(self): self.assertTrue(self.cls_set_up) self.db = DB() def tearDown(self): self.doCleanups() self.db.close() def test_get_tables(self): tables = self.db.get_tables() for num_schema in range(5): schema = 's' + str(num_schema) if num_schema else 'public' for t in (schema + '.t', schema + '.t' + str(num_schema)): self.assertIn(t, tables) def test_get_attnames(self): get_attnames = self.db.get_attnames query = self.db.query result = {'d': 'int', 'n': 'int'} if self.with_oids: result['oid'] = 'int' r = get_attnames("t") self.assertEqual(r, result) r = get_attnames("s4.t4") self.assertEqual(r, result) query("drop table if exists s3.t3m") self.addCleanup(query, "drop table s3.t3m") query(f"create table s3.t3m {self.with_oids} as select 1 as m") result_m = {'m': 'int'} if self.with_oids: result_m['oid'] = 'int' r = get_attnames("s3.t3m") self.assertEqual(r, result_m) query("set search_path to s1,s3") r = get_attnames("t3") self.assertEqual(r, result) r = get_attnames("t3m") self.assertEqual(r, result_m) def test_get(self): get = self.db.get query = self.db.query PrgError = pg.ProgrammingError # noqa: N806 self.assertEqual(get("t", 1, 'n')['d'], 0) self.assertEqual(get("t0", 1, 'n')['d'], 0) self.assertEqual(get("public.t", 1, 'n')['d'], 0) self.assertEqual(get("public.t0", 1, 'n')['d'], 0) self.assertRaises(PrgError, get, "public.t1", 1, 'n') self.assertEqual(get("s1.t1", 1, 'n')['d'], 1) self.assertEqual(get("s3.t", 1, 'n')['d'], 3) query("set search_path to s2,s4") self.assertRaises(PrgError, get, "t1", 1, 'n') self.assertEqual(get("t4", 1, 'n')['d'], 4) self.assertRaises(PrgError, get, "t3", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 2) self.assertEqual(get("s3.t3", 1, 'n')['d'], 3) query("set search_path to s1,s3") self.assertRaises(PrgError, get, "t2", 1, 'n') self.assertEqual(get("t3", 1, 'n')['d'], 3) self.assertRaises(PrgError, get, "t4", 1, 'n') self.assertEqual(get("t", 1, 'n')['d'], 1) self.assertEqual(get("s4.t4", 1, 'n')['d'], 4) def test_munging(self): get = self.db.get query = self.db.query r = get("t", 1, 'n') if self.with_oids: self.assertIn('oid(t)', r) else: self.assertNotIn('oid(t)', r) query("set search_path to s2") r = get("t2", 1, 'n') if self.with_oids: self.assertIn('oid(t2)', r) else: self.assertNotIn('oid(t2)', r) query("set search_path to s3") r = get("t", 1, 'n') if self.with_oids: self.assertIn('oid(t)', r) else: self.assertNotIn('oid(t)', r) def test_query_information_schema(self): q = "column_name" if self.db.server_version < 110000: q += "::text" # old version does not have sql_identifier array q = f"select array_agg({q}) from information_schema.columns" q += " where table_schema in ('s1', 's2', 's3', 's4')" r = self.db.query(q).onescalar() self.assertIsInstance(r, list) self.assertEqual(set(r), set(['d', 'n'] * 8)) class TestDebug(unittest.TestCase): """Test the debug attribute of the DB class.""" def setUp(self): self.db = DB() self.query = self.db.query self.debug = self.db.debug # type: ignore self.output = StringIO() self.stdout, sys.stdout = sys.stdout, self.output def tearDown(self): sys.stdout = self.stdout self.output.close() self.db.debug = debug self.db.close() def get_output(self): return self.output.getvalue() def send_queries(self): self.db.query("select 1") self.db.query("select 2") def test_debug_default(self): if debug: self.assertEqual(self.db.debug, debug) else: self.assertIsNone(self.db.debug) def test_debug_is_false(self): self.db.debug = False self.send_queries() self.assertEqual(self.get_output(), "") def test_debug_is_true(self): self.db.debug = True self.send_queries() self.assertEqual(self.get_output(), "select 1\nselect 2\n") def test_debug_is_string(self): self.db.debug = "Test with string: %s." self.send_queries() self.assertEqual( self.get_output(), "Test with string: select 1.\nTest with string: select 2.\n") def test_debug_is_file_like(self): with tempfile.TemporaryFile('w+') as debug_file: self.db.debug = debug_file self.send_queries() debug_file.seek(0) output = debug_file.read() self.assertEqual(output, "select 1\nselect 2\n") self.assertEqual(self.get_output(), "") def test_debug_is_callable(self): output: list[str] = [] self.db.debug = output.append self.db.query("select 1") self.db.query("select 2") self.assertEqual(output, ["select 1", "select 2"]) self.assertEqual(self.get_output(), "") def test_debug_multiple_args(self): output: list[str] = [] self.db.debug = output.append args = ['Error', 42, {1: 'a', 2: 'b'}, [3, 5, 7]] self.db._do_debug(*args) self.assertEqual(output, ['\n'.join(str(arg) for arg in args)]) self.assertEqual(self.get_output(), "") class TestMemoryLeaks(unittest.TestCase): """Test that the DB class does not leak memory.""" def get_leaks(self, fut: Callable): ids: set = set() objs: list = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) fut() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] self.assertEqual(len(objs), 0) def test_leaks_with_close(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() db.close() self.get_leaks(fut) def test_leaks_without_close(self): def fut(): db = DB() db.query("select $1::int as r", 42).dictresult() self.get_leaks(fut) if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_classic_functions.py000077500000000000000000001215331450706350600232770ustar00rootroot00000000000000#!/usr/bin/python """Test the classic PyGreSQL interface. Sub-tests for the module functions and constants. Contributed by Christoph Zwerschke. These tests do not need a database to test against. """ from __future__ import annotations import json import re import unittest from datetime import timedelta from decimal import Decimal from typing import Any, Sequence import pg # the module under test class TestHasConnect(unittest.TestCase): """Test existence of basic pg module functions.""" def test_has_pg_error(self): self.assertTrue(issubclass(pg.Error, Exception)) def test_has_pg_warning(self): self.assertTrue(issubclass(pg.Warning, Exception)) def test_has_pg_interface_error(self): self.assertTrue(issubclass(pg.InterfaceError, pg.Error)) def test_has_pg_database_error(self): self.assertTrue(issubclass(pg.DatabaseError, pg.Error)) def test_has_pg_internal_error(self): self.assertTrue(issubclass(pg.InternalError, pg.DatabaseError)) def test_has_pg_operational_error(self): self.assertTrue(issubclass(pg.OperationalError, pg.DatabaseError)) def test_has_pg_programming_error(self): self.assertTrue(issubclass(pg.ProgrammingError, pg.DatabaseError)) def test_has_pg_integrity_error(self): self.assertTrue(issubclass(pg.IntegrityError, pg.DatabaseError)) def test_has_pg_data_error(self): self.assertTrue(issubclass(pg.DataError, pg.DatabaseError)) def test_has_pg_not_supported_error(self): self.assertTrue(issubclass(pg.NotSupportedError, pg.DatabaseError)) def test_has_pg_invalid_result_error(self): self.assertTrue(issubclass(pg.InvalidResultError, pg.DataError)) def test_has_pg_no_result_error(self): self.assertTrue(issubclass(pg.NoResultError, pg.InvalidResultError)) def test_has_pg_multiple_results_error(self): self.assertTrue( issubclass(pg.MultipleResultsError, pg.InvalidResultError)) def test_has_connection_type(self): self.assertIsInstance(pg.Connection, type) self.assertEqual(pg.Connection.__name__, 'Connection') def test_has_query_type(self): self.assertIsInstance(pg.Query, type) self.assertEqual(pg.Query.__name__, 'Query') def test_has_connect(self): self.assertTrue(callable(pg.connect)) def test_has_escape_string(self): self.assertTrue(callable(pg.escape_string)) def test_has_escape_bytea(self): self.assertTrue(callable(pg.escape_bytea)) def test_has_unescape_bytea(self): self.assertTrue(callable(pg.unescape_bytea)) def test_def_host(self): d0 = pg.get_defhost() d1 = 'pgtesthost' pg.set_defhost(d1) self.assertEqual(pg.get_defhost(), d1) pg.set_defhost(d0) self.assertEqual(pg.get_defhost(), d0) def test_def_port(self): d0 = pg.get_defport() d1 = 1234 pg.set_defport(d1) self.assertEqual(pg.get_defport(), d1) if d0 is None: d0 = -1 pg.set_defport(d0) if d0 == -1: d0 = None self.assertEqual(pg.get_defport(), d0) def test_def_opt(self): d0 = pg.get_defopt() d1 = '-h pgtesthost -p 1234' pg.set_defopt(d1) self.assertEqual(pg.get_defopt(), d1) pg.set_defopt(d0) self.assertEqual(pg.get_defopt(), d0) def test_def_base(self): d0 = pg.get_defbase() d1 = 'pgtestdb' pg.set_defbase(d1) self.assertEqual(pg.get_defbase(), d1) pg.set_defbase(d0) self.assertEqual(pg.get_defbase(), d0) def test_pqlib_version(self): # noinspection PyUnresolvedReferences v = pg.get_pqlib_version() self.assertIsInstance(v, int) self.assertGreater(v, 100000) self.assertLess(v, 170000) class TestParseArray(unittest.TestCase): """Test the array parser.""" test_strings: Sequence[tuple[str, type | None, Any]] = [ ('', str, ValueError), ('{}', None, []), ('{}', str, []), (' { } ', None, []), ('{', str, ValueError), ('{{}', str, ValueError), ('{}{', str, ValueError), ('[]', str, ValueError), ('()', str, ValueError), ('{[]}', str, ['[]']), ('{hello}', int, ValueError), ('{42}', int, [42]), ('{ 42 }', int, [42]), ('{42', int, ValueError), ('{ 42 ', int, ValueError), ('{hello}', str, ['hello']), ('{ hello }', str, ['hello']), ('{hi} ', str, ['hi']), ('{hi} ?', str, ValueError), ('{null}', str, [None]), (' { NULL } ', str, [None]), (' { NULL } ', str, [None]), (' { not null } ', str, ['not null']), (' { not NULL } ', str, ['not NULL']), (' {"null"} ', str, ['null']), (' {"NULL"} ', str, ['NULL']), ('{Hi!}', str, ['Hi!']), ('{"Hi!"}', str, ['Hi!']), ('{" Hi! "}', str, [' Hi! ']), ('{a"}', str, ValueError), ('{"b}', str, ValueError), ('{a"b}', str, ValueError), (r'{a\"b}', str, ['a"b']), (r'{a\,b}', str, ['a,b']), (r'{a\bc}', str, ['abc']), (r'{"a\bc"}', str, ['abc']), (r'{\a\b\c}', str, ['abc']), (r'{"\a\b\c"}', str, ['abc']), (r'{"a"b"}', str, ValueError), (r'{"a""b"}', str, ValueError), (r'{"a\"b"}', str, ['a"b']), ('{"{}"}', str, ['{}']), (r'{\{\}}', str, ['{}']), ('{"{a,b,c}"}', str, ['{a,b,c}']), ("{'abc'}", str, ["'abc'"]), ('{"abc"}', str, ['abc']), (r'{\"abc\"}', str, ['"abc"']), (r"{\'abc\'}", str, ["'abc'"]), (r"{abc,d,efg}", str, ['abc', 'd', 'efg']), ('{Hello World!}', str, ['Hello World!']), ('{Hello, World!}', str, ['Hello', 'World!']), (r'{Hello,\ World!}', str, ['Hello', ' World!']), (r'{Hello\, World!}', str, ['Hello, World!']), ('{"Hello World!"}', str, ['Hello World!']), ('{this, should, be, null}', str, ['this', 'should', 'be', None]), ('{This, should, be, NULL}', str, ['This', 'should', 'be', None]), ('{3, 2, 1, null}', int, [3, 2, 1, None]), ('{3, 2, 1, NULL}', int, [3, 2, 1, None]), ('{3,17,51}', int, [3, 17, 51]), (' { 3 , 17 , 51 } ', int, [3, 17, 51]), ('{3,17,51}', str, ['3', '17', '51']), (' { 3 , 17 , 51 } ', str, ['3', '17', '51']), ('{1,"2",abc,"def"}', str, ['1', '2', 'abc', 'def']), ('{{}}', int, [[]]), ('{{},{}}', int, [[], []]), ('{ {} , {} , {} }', int, [[], [], []]), ('{ {} , {} , {} , }', int, ValueError), ('{{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), ('{{1,2,3},{4,5,6},{7,8,9}}', int, [[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ('{20000, 25000, 25000, 25000}', int, [20000, 25000, 25000, 25000]), ('{{{17,18,19},{14,15,16},{11,12,13}},' '{{27,28,29},{24,25,26},{21,22,23}},' '{{37,38,39},{34,35,36},{31,32,33}}}', int, [[[17, 18, 19], [14, 15, 16], [11, 12, 13]], [[27, 28, 29], [24, 25, 26], [21, 22, 23]], [[37, 38, 39], [34, 35, 36], [31, 32, 33]]]), ('{{"breakfast", "consulting"}, {"meeting", "lunch"}}', str, [['breakfast', 'consulting'], ['meeting', 'lunch']]), ('[1:3]={1,2,3}', int, [1, 2, 3]), ('[-1:1]={1,2,3}', int, [1, 2, 3]), ('[-1:+1]={1,2,3}', int, [1, 2, 3]), ('[-3:-1]={1,2,3}', int, [1, 2, 3]), ('[+1:+3]={1,2,3}', int, [1, 2, 3]), ('[0:2]={1,2,3}', int, [1, 2, 3]), ('[7:9]={1,2,3}', int, [1, 2, 3]), ('[]={1,2,3}', int, ValueError), ('[1:]={1,2,3}', int, ValueError), ('[:3]={1,2,3}', int, ValueError), ('[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}', int, [[[1, 2, 3], [4, 5, 6]]]), (' [1:1] [-2:-1] [3:5] = { { { 1 , 2 , 3 }, {4 , 5 , 6 } } }', int, [[[1, 2, 3], [4, 5, 6]]]), ('[1:1][3:5]={{1,2,3},{4,5,6}}', int, [[1, 2, 3], [4, 5, 6]]), ('[3:5]={{1,2,3},{4,5,6}}', int, ValueError), ('[1:1][-2:-1][3:5]={{1,2,3},{4,5,6}}', int, ValueError)] def test_parser_params(self): f = pg.cast_array self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '{}', 1) self.assertRaises(TypeError, f, '{}', b',',) self.assertRaises(TypeError, f, '{}', None, None) self.assertRaises(TypeError, f, '{}', None, 1) self.assertRaises(TypeError, f, '{}', None, b'') self.assertRaises(ValueError, f, '{}', None, b'\\') self.assertRaises(ValueError, f, '{}', None, b'{') self.assertRaises(ValueError, f, '{}', None, b'}') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('{}'), []) self.assertEqual(f('{}', None), []) self.assertEqual(f('{}', None, b';'), []) self.assertEqual(f('{}', str), []) self.assertEqual(f('{}', str, b';'), []) def test_parser_simple(self): r = pg.cast_array('{a,b,c}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def test_parser_nested(self): f = pg.cast_array r = f('{{a,b,c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertIsInstance(r, list) self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) self.assertRaises(ValueError, f, '{a,{b,c}}') r = f('{{a,b},{c,d}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 2) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(r, ['c', 'd']) r = f('{{a},{b},{c}}') self.assertIsInstance(r, list) self.assertEqual(len(r), 3) r = r[1] self.assertIsInstance(r, list) self.assertEqual(len(r), 1) self.assertEqual(r[0], 'b') r = f('{{{{{{{abc}}}}}}}') for _i in range(7): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) # noinspection PyUnresolvedReferences r = r[0] self.assertEqual(r, 'abc') def test_parser_too_deeply_nested(self): f = pg.cast_array for n in 3, 5, 9, 12, 16, 32, 64, 256: s = '{' * n + 'a,b,c' + '}' * n if n > 16: # hard coded maximum depth self.assertRaises(ValueError, f, s) else: r = f(s) for _i in range(n - 1): self.assertIsInstance(r, list) self.assertEqual(len(r), 1) r = r[0] self.assertEqual(len(r), 3) self.assertEqual(r, ['a', 'b', 'c']) def test_parser_cast(self): f = pg.cast_array self.assertEqual(f('{1}'), ['1']) self.assertEqual(f('{1}', None), ['1']) self.assertEqual(f('{1}', int), [1]) self.assertEqual(f('{1}', str), ['1']) self.assertEqual(f('{a}'), ['a']) self.assertEqual(f('{a}', None), ['a']) self.assertRaises(ValueError, f, '{a}', int) self.assertEqual(f('{a}', str), ['a']) def cast(s): return f'{s} is ok' self.assertEqual(f('{a}', cast), ['a is ok']) def test_parser_delim(self): f = pg.cast_array self.assertEqual(f('{1,2}'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b','), ['1', '2']) self.assertEqual(f('{1;2}'), ['1;2']) self.assertEqual(f('{1;2}', delim=b';'), ['1', '2']) self.assertEqual(f('{1,2}', delim=b';'), ['1,2']) def test_parser_with_data(self): f = pg.cast_array for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def test_parser_without_cast(self): f = pg.cast_array for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def test_parser_with_different_delimiter(self): f = pg.cast_array def replace_comma(value): if isinstance(value, str): return value.replace(',', ';') elif isinstance(value, list): return [replace_comma(v) for v in value] else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseRecord(unittest.TestCase): """Test the record parser.""" test_strings: Sequence[tuple[str, type | tuple[type, ...] | None, Any]] = [ ('', None, ValueError), ('', str, ValueError), ('(', None, ValueError), ('(', str, ValueError), ('()', None, (None,)), ('()', str, (None,)), ('()', int, (None,)), ('(,)', str, (None, None)), ('( , )', str, (' ', ' ')), ('(")', None, ValueError), ('("")', None, ('',)), ('("")', str, ('',)), ('("")', int, ValueError), ('("" )', None, (' ',)), ('("" )', str, (' ',)), ('("" )', int, ValueError), (' () ', None, (None,)), (' ( ) ', None, (' ',)), ('(', str, ValueError), ('(()', str, ('(',)), ('(())', str, ValueError), ('()(', str, ValueError), ('()()', str, ValueError), ('[]', str, ValueError), ('{}', str, ValueError), ('([])', str, ('[]',)), ('(hello)', int, ValueError), ('(42)', int, (42,)), ('( 42 )', int, (42,)), ('( 42)', int, (42,)), ('(42)', str, ('42',)), ('( 42 )', str, (' 42 ',)), ('( 42)', str, (' 42',)), ('(42', int, ValueError), ('( 42 ', int, ValueError), ('(hello)', str, ('hello',)), ('( hello )', str, (' hello ',)), ('(hello))', str, ValueError), (' (hello) ', str, ('hello',)), (' (hello) )', str, ValueError), ('(hello)?', str, ValueError), ('(null)', str, ('null',)), ('(null)', int, ValueError), (' ( NULL ) ', str, (' NULL ',)), (' ( NULL ) ', str, (' NULL ',)), (' ( null null ) ', str, (' null null ',)), (' ("null") ', str, ('null',)), (' ("NULL") ', str, ('NULL',)), ('(Hi!)', str, ('Hi!',)), ('("Hi!")', str, ('Hi!',)), ("('Hi!')", str, ("'Hi!'",)), ('(" Hi! ")', str, (' Hi! ',)), ('("Hi!" )', str, ('Hi! ',)), ('( "Hi!")', str, (' Hi!',)), ('( "Hi!" )', str, (' Hi! ',)), ('( ""Hi!"" )', str, (' Hi! ',)), ('( """Hi!""" )', str, (' "Hi!" ',)), ('(a")', str, ValueError), ('("b)', str, ValueError), ('("a" "b)', str, ValueError), ('("a" "b")', str, ('a b',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a" "b" "c" )', str, (' a b c ',)), ('( "a,b" "c,d" )', str, (' a,b c,d ',)), ('( "(a,b,c)" d, e, "f,g")', str, (' (a,b,c) d', ' e', ' f,g')), ('(a",b,c",d,"e,f")', str, ('a,b,c', 'd', 'e,f')), ('( """a,b""", ""c,d"", "e,f", "g", ""h"", """i""")', str, (' "a,b"', ' c', 'd', ' e,f', ' g', ' h', ' "i"')), ('(a",b)",c"),(d,e)",f,g)', str, ('a,b)', 'c),(d,e)', 'f', 'g')), ('(a"b)', str, ValueError), (r'(a\"b)', str, ('a"b',)), ('(a""b)', str, ('ab',)), ('("a""b")', str, ('a"b',)), (r'(a\,b)', str, ('a,b',)), (r'(a\bc)', str, ('abc',)), (r'("a\bc")', str, ('abc',)), (r'(\a\b\c)', str, ('abc',)), (r'("\a\b\c")', str, ('abc',)), ('("()")', str, ('()',)), (r'(\,)', str, (',',)), (r'(\(\))', str, ('()',)), (r'(\)\()', str, (')(',)), ('("(a,b,c)")', str, ('(a,b,c)',)), ("('abc')", str, ("'abc'",)), ('("abc")', str, ('abc',)), (r'(\"abc\")', str, ('"abc"',)), (r"(\'abc\')", str, ("'abc'",)), ('(Hello World!)', str, ('Hello World!',)), ('(Hello, World!)', str, ('Hello', ' World!',)), (r'(Hello,\ World!)', str, ('Hello', ' World!',)), (r'(Hello\, World!)', str, ('Hello, World!',)), ('("Hello World!")', str, ('Hello World!',)), ("(this,shouldn't,be,null)", str, ('this', "shouldn't", 'be', 'null')), ('(null,should,be,)', str, ('null', 'should', 'be', None)), ('(abcABC0123!?+-*/=&%$\\\\\'\\"{[]}"""":;\\,,)', str, ('abcABC0123!?+-*/=&%$\\\'"{[]}":;,', None)), ('(3, 2, 1,)', int, (3, 2, 1, None)), ('(3, 2, 1, )', int, ValueError), ('(, 1, 2, 3)', int, (None, 1, 2, 3)), ('( , 1, 2, 3)', int, ValueError), ('(,1,,2,,3,)', int, (None, 1, None, 2, None, 3, None)), ('(3,17,51)', int, (3, 17, 51)), (' ( 3 , 17 , 51 ) ', int, (3, 17, 51)), ('(3,17,51)', str, ('3', '17', '51')), (' ( 3 , 17 , 51 ) ', str, (' 3 ', ' 17 ', ' 51 ')), ('(1,"2",abc,"def")', str, ('1', '2', 'abc', 'def')), ('(())', str, ValueError), ('()))', str, ValueError), ('()()', str, ValueError), ('((()', str, ('((',)), ('(())', int, ValueError), ('((),())', str, ValueError), ('("()","()")', str, ('()', '()')), ('( " () , () , () " )', str, (' () , () , () ',)), ('(20000, 25000, 25000, 25000)', int, (20000, 25000, 25000, 25000)), ('("breakfast","consulting","meeting","lunch")', str, ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str), ValueError), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str), ('breakfast', 'consulting', 'meeting', 'lunch')), ('("breakfast","consulting","meeting","lunch")', (str, str, str, str, str), ValueError), ('("fuzzy dice",42,1.9375)', None, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', str, ('fuzzy dice', '42', '1.9375')), ('("fuzzy dice",42,1.9375)', int, ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float), ('fuzzy dice', 42, 1.9375)), ('("fuzzy dice",42,1.9375)', (str, int), ValueError), ('("fuzzy dice",42,1.9375)', (str, int, float, str), ValueError), ('("fuzzy dice",42,)', (str, int, float), ('fuzzy dice', 42, None)), ('("fuzzy dice",42,)', (str, int), ValueError), ('("",42,)', (str, int, float), ('', 42, None)), ('("fuzzy dice","",1.9375)', (str, int, float), ValueError), ('(fuzzy dice,"42","1.9375")', (str, int, float), ('fuzzy dice', 42, 1.9375))] def test_parser_params(self): f = pg.cast_record self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, '()', 1) self.assertRaises(TypeError, f, '()', b',',) self.assertRaises(TypeError, f, '()', None, None) self.assertRaises(TypeError, f, '()', None, 1) self.assertRaises(TypeError, f, '()', None, b'') self.assertRaises(ValueError, f, '()', None, b'\\') self.assertRaises(ValueError, f, '()', None, b'(') self.assertRaises(ValueError, f, '()', None, b')') self.assertRaises(TypeError, f, '{}', None, b',;') self.assertEqual(f('()'), (None,)) self.assertEqual(f('()', None), (None,)) self.assertEqual(f('()', None, b';'), (None,)) self.assertEqual(f('()', str), (None,)) self.assertEqual(f('()', str, b';'), (None,)) def test_parser_simple(self): r = pg.cast_record('(a,b,c)') self.assertIsInstance(r, tuple) self.assertEqual(len(r), 3) self.assertEqual(r, ('a', 'b', 'c')) def test_parser_nested(self): f = pg.cast_record self.assertRaises(ValueError, f, '((a,b,c))') self.assertRaises(ValueError, f, '((a,b),(c,d))') self.assertRaises(ValueError, f, '((a),(b),(c))') self.assertRaises(ValueError, f, '(((((((abc)))))))') def test_parser_many_elements(self): f = pg.cast_record for n in 3, 5, 9, 12, 16, 32, 64, 256: s = ','.join(map(str, range(n))) s = f'({s})' r = f(s, int) self.assertEqual(r, tuple(range(n))) def test_parser_cast_uniform(self): f = pg.cast_record self.assertEqual(f('(1)'), ('1',)) self.assertEqual(f('(1)', None), ('1',)) self.assertEqual(f('(1)', int), (1,)) self.assertEqual(f('(1)', str), ('1',)) self.assertEqual(f('(a)'), ('a',)) self.assertEqual(f('(a)', None), ('a',)) self.assertRaises(ValueError, f, '(a)', int) self.assertEqual(f('(a)', str), ('a',)) def cast(s): return f'{s} is ok' self.assertEqual(f('(a)', cast), ('a is ok',)) def test_parser_cast_non_uniform(self): f = pg.cast_record self.assertEqual(f('(1)', []), ('1',)) self.assertEqual(f('(1)', [None]), ('1',)) self.assertEqual(f('(1)', [str]), ('1',)) self.assertEqual(f('(1)', [int]), (1,)) self.assertRaises(ValueError, f, '(1)', [None, None]) self.assertRaises(ValueError, f, '(1)', [str, str]) self.assertRaises(ValueError, f, '(1)', [int, int]) self.assertEqual(f('(a)', [None]), ('a',)) self.assertEqual(f('(a)', [str]), ('a',)) self.assertRaises(ValueError, f, '(a)', [int]) self.assertEqual(f('(1,a)', [int, str]), (1, 'a')) self.assertRaises(ValueError, f, '(1,a)', [str, int]) self.assertEqual(f('(a,1)', [str, int]), ('a', 1)) self.assertRaises(ValueError, f, '(a,1)', [int, str]) self.assertEqual( f('(1,a,2,b,3,c)', [int, str, int, str, int, str]), (1, 'a', 2, 'b', 3, 'c')) self.assertEqual( f('(1,a,2,b,3,c)', (int, str, int, str, int, str)), (1, 'a', 2, 'b', 3, 'c')) def cast1(s): return f'{s} is ok' self.assertEqual(f('(a)', [cast1]), ('a is ok',)) def cast2(s): return f'and {s} is ok, too' self.assertEqual( f('(a,b)', [cast1, cast2]), ('a is ok', 'and b is ok, too')) self.assertRaises(ValueError, f, '(a)', [cast1, cast2]) self.assertRaises(ValueError, f, '(a,b,c)', [cast1, cast2]) self.assertEqual( f('(1,2,3,4,5,6)', [int, float, str, None, cast1, cast2]), (1, 2.0, '3', '4', '5 is ok', 'and 6 is ok, too')) def test_parser_delim(self): f = pg.cast_record self.assertEqual(f('(1,2)'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b','), ('1', '2')) self.assertEqual(f('(1;2)'), ('1;2',)) self.assertEqual(f('(1;2)', delim=b';'), ('1', '2')) self.assertEqual(f('(1,2)', delim=b';'), ('1,2',)) def test_parser_with_data(self): f = pg.cast_record for string, cast, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: self.assertEqual(f(string, cast), expected) def test_parser_without_cast(self): f = pg.cast_record for string, cast, expected in self.test_strings: if cast is not str: continue if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) def test_parser_with_different_delimiter(self): f = pg.cast_record def replace_comma(value): if isinstance(value, str): return value.replace(';', '@').replace( ',', ';').replace('@', ',') elif isinstance(value, tuple): return tuple(replace_comma(v) for v in value) else: return value for string, cast, expected in self.test_strings: string = replace_comma(string) if expected is ValueError: self.assertRaises(ValueError, f, string, cast) else: expected = replace_comma(expected) self.assertEqual(f(string, cast, b';'), expected) class TestParseHStore(unittest.TestCase): """Test the hstore parser.""" test_strings: Sequence[tuple[str, Any]] = [ ('', {}), ('=>', ValueError), ('""=>', ValueError), ('=>""', ValueError), ('""=>""', {'': ''}), ('NULL=>NULL', {'NULL': None}), ('null=>null', {'null': None}), ('NULL=>"NULL"', {'NULL': 'NULL'}), ('null=>"null"', {'null': 'null'}), ('k', ValueError), ('k,', ValueError), ('k=', ValueError), ('k=>', ValueError), ('k=>v', {'k': 'v'}), ('k=>v,', ValueError), (' k => v ', {'k': 'v'}), (' k => v ', {'k': 'v'}), ('" k " => " v "', {' k ': ' v '}), ('"k=>v', ValueError), ('k=>"v', ValueError), ('"1-a" => "anything at all"', {'1-a': 'anything at all'}), ('k => v, foo => bar, baz => whatever, "1-a" => "anything at all"', {'k': 'v', 'foo': 'bar', 'baz': 'whatever', '1-a': 'anything at all'}), ('"Hello, World!"=>"Hi!"', {'Hello, World!': 'Hi!'}), ('"Hi!"=>"Hello, World!"', {'Hi!': 'Hello, World!'}), (r'"k=>v"=>k\=\>v', {'k=>v': 'k=>v'}), (r'k\=\>v=>"k=>v"', {'k=>v': 'k=>v'}), ('a\\,b=>a,b=>a', {'a,b': 'a', 'b': 'a'})] def test_parser(self): f = pg.cast_hstore self.assertRaises(TypeError, f) self.assertRaises(TypeError, f, None) self.assertRaises(TypeError, f, 42) self.assertRaises(TypeError, f, '', None) for string, expected in self.test_strings: if expected is ValueError: self.assertRaises(ValueError, f, string) else: self.assertEqual(f(string), expected) class TestCastInterval(unittest.TestCase): """Test the interval typecast function.""" intervals: Sequence[tuple[tuple[int, ...], tuple[str, ...]]] = [ ((0, 0, 0, 1, 0, 0, 0), ('1:00:00', '01:00:00', '@ 1 hour', 'PT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-1:00:00', '-01:00:00', '@ -1 hour', 'PT-1H')), ((0, 0, 0, 1, 0, 0, 0), ('0-0 0 1:00:00', '0 years 0 mons 0 days 01:00:00', '@ 0 years 0 mons 0 days 1 hour', 'P0Y0M0DT1H')), ((0, 0, 0, -1, 0, 0, 0), ('-0-0 -1:00:00', '0 years 0 mons 0 days -01:00:00', '@ 0 years 0 mons 0 days -1 hour', 'P0Y0M0DT-1H')), ((0, 0, 1, 0, 0, 0, 0), ('1 0:00:00', '1 day', '@ 1 day', 'P1D')), ((0, 0, -1, 0, 0, 0, 0), ('-1 0:00:00', '-1 day', '@ -1 day', 'P-1D')), ((0, 1, 0, 0, 0, 0, 0), ('0-1', '1 mon', '@ 1 mon', 'P1M')), ((1, 0, 0, 0, 0, 0, 0), ('1-0', '1 year', '@ 1 year', 'P1Y')), ((0, 0, 0, 2, 0, 0, 0), ('2:00:00', '02:00:00', '@ 2 hours', 'PT2H')), ((0, 0, 2, 0, 0, 0, 0), ('2 0:00:00', '2 days', '@ 2 days', 'P2D')), ((0, 2, 0, 0, 0, 0, 0), ('0-2', '2 mons', '@ 2 mons', 'P2M')), ((2, 0, 0, 0, 0, 0, 0), ('2-0', '2 years', '@ 2 years', 'P2Y')), ((0, 0, 0, -3, 0, 0, 0), ('-3:00:00', '-03:00:00', '@ 3 hours ago', 'PT-3H')), ((0, 0, -3, 0, 0, 0, 0), ('-3 0:00:00', '-3 days', '@ 3 days ago', 'P-3D')), ((0, -3, 0, 0, 0, 0, 0), ('-0-3', '-3 mons', '@ 3 mons ago', 'P-3M')), ((-3, 0, 0, 0, 0, 0, 0), ('-3-0', '-3 years', '@ 3 years ago', 'P-3Y')), ((0, 0, 0, 0, 1, 0, 0), ('0:01:00', '00:01:00', '@ 1 min', 'PT1M')), ((0, 0, 0, 0, 0, 1, 0), ('0:00:01', '00:00:01', '@ 1 sec', 'PT1S')), ((0, 0, 0, 0, 0, 0, 1), ('0:00:00.000001', '00:00:00.000001', '@ 0.000001 secs', 'PT0.000001S')), ((0, 0, 0, 0, 2, 0, 0), ('0:02:00', '00:02:00', '@ 2 mins', 'PT2M')), ((0, 0, 0, 0, 0, 2, 0), ('0:00:02', '00:00:02', '@ 2 secs', 'PT2S')), ((0, 0, 0, 0, 0, 0, 2), ('0:00:00.000002', '00:00:00.000002', '@ 0.000002 secs', 'PT0.000002S')), ((0, 0, 0, 0, -3, 0, 0), ('-0:03:00', '-00:03:00', '@ 3 mins ago', 'PT-3M')), ((0, 0, 0, 0, 0, -3, 0), ('-0:00:03', '-00:00:03', '@ 3 secs ago', 'PT-3S')), ((0, 0, 0, 0, 0, 0, -3), ('-0:00:00.000003', '-00:00:00.000003', '@ 0.000003 secs ago', 'PT-0.000003S')), ((1, 2, 0, 0, 0, 0, 0), ('1-2', '1 year 2 mons', '@ 1 year 2 mons', 'P1Y2M')), ((0, 0, 3, 4, 5, 6, 0), ('3 4:05:06', '3 days 04:05:06', '@ 3 days 4 hours 5 mins 6 secs', 'P3DT4H5M6S')), ((1, 2, 3, 4, 5, 6, 0), ('+1-2 +3 +4:05:06', '1 year 2 mons 3 days 04:05:06', '@ 1 year 2 mons 3 days 4 hours 5 mins 6 secs', 'P1Y2M3DT4H5M6S')), ((1, 2, 3, -4, -5, -6, 0), ('+1-2 +3 -4:05:06', '1 year 2 mons 3 days -04:05:06', '@ 1 year 2 mons 3 days -4 hours -5 mins -6 secs', 'P1Y2M3DT-4H-5M-6S')), ((1, 2, 3, -4, 5, 6, 0), ('+1-2 +3 -3:54:54', '1 year 2 mons 3 days -03:54:54', '@ 1 year 2 mons 3 days -3 hours -54 mins -54 secs', 'P1Y2M3DT-3H-54M-54S')), ((-1, -2, 3, -4, -5, -6, 0), ('-1-2 +3 -4:05:06', '-1 years -2 mons +3 days -04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago', 'P-1Y-2M3DT-4H-5M-6S')), ((1, 2, -3, 4, 5, 6, 0), ('+1-2 -3 +4:05:06', '1 year 2 mons -3 days +04:05:06', '@ 1 year 2 mons -3 days 4 hours 5 mins 6 secs', 'P1Y2M-3DT4H5M6S')), ((0, 0, 0, 1, 30, 0, 0), ('1:30:00', '01:30:00', '@ 1 hour 30 mins', 'PT1H30M')), ((0, 0, 0, 3, 15, 45, 123456), ('3:15:45.123456', '03:15:45.123456', '@ 3 hours 15 mins 45.123456 secs', 'PT3H15M45.123456S')), ((0, 0, 0, 3, 15, -5, 123), ('3:14:55.000123', '03:14:55.000123', '@ 3 hours 14 mins 55.000123 secs', 'PT3H14M55.000123S')), ((0, 0, 0, 3, -5, 15, -12345), ('2:55:14.987655', '02:55:14.987655', '@ 2 hours 55 mins 14.987655 secs', 'PT2H55M14.987655S')), ((0, 0, 0, 2, -1, 0, 0), ('1:59:00', '01:59:00', '@ 1 hour 59 mins', 'PT1H59M')), ((0, 0, 0, -1, 2, 0, 0), ('-0:58:00', '-00:58:00', '@ 58 mins ago', 'PT-58M')), ((1, 11, 0, 0, 0, 0, 0), ('1-11', '1 year 11 mons', '@ 1 year 11 mons', 'P1Y11M')), ((0, -10, 0, 0, 0, 0, 0), ('-0-10', '-10 mons', '@ 10 mons ago', 'P-10M')), ((0, 0, 2, -1, 0, 0, 0), ('+0-0 +2 -1:00:00', '2 days -01:00:00', '@ 2 days -1 hours', 'P2DT-1H')), ((0, 0, -1, 2, 0, 0, 0), ('+0-0 -1 +2:00:00', '-1 days +02:00:00', '@ 1 day -2 hours ago', 'P-1DT2H')), ((0, 0, 1, 0, 0, 0, 1), ('1 0:00:00.000001', '1 day 00:00:00.000001', '@ 1 day 0.000001 secs', 'P1DT0.000001S')), ((0, 0, 1, 0, 0, 1, 0), ('1 0:00:01', '1 day 00:00:01', '@ 1 day 1 sec', 'P1DT1S')), ((0, 0, 1, 0, 1, 0, 0), ('1 0:01:00', '1 day 00:01:00', '@ 1 day 1 min', 'P1DT1M')), ((0, 0, 0, 0, 1, 0, -1), ('0:00:59.999999', '00:00:59.999999', '@ 59.999999 secs', 'PT59.999999S')), ((0, 0, 0, 0, -1, 0, 1), ('-0:00:59.999999', '-00:00:59.999999', '@ 59.999999 secs ago', 'PT-59.999999S')), ((0, 0, 0, 0, -1, 1, 1), ('-0:00:58.999999', '-00:00:58.999999', '@ 58.999999 secs ago', 'PT-58.999999S')), ((0, 0, 42, 0, 0, 0, 0), ('42 0:00:00', '42 days', '@ 42 days', 'P42D')), ((0, 0, -7, 0, 0, 0, 0), ('-7 0:00:00', '-7 days', '@ 7 days ago', 'P-7D')), ((1, 1, 1, 1, 1, 0, 0), ('+1-1 +1 +1:01:00', '1 year 1 mon 1 day 01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min', 'P1Y1M1DT1H1M')), ((0, -11, -1, -1, 1, 0, 0), ('-0-11 -1 -0:59:00', '-11 mons -1 days -00:59:00', '@ 11 mons 1 day 59 mins ago', 'P-11M-1DT-59M')), ((-1, -1, -1, -1, -1, 0, 0), ('-1-1 -1 -1:01:00', '-1 years -1 mons -1 days -01:01:00', '@ 1 year 1 mon 1 day 1 hour 1 min ago', 'P-1Y-1M-1DT-1H-1M')), ((-1, 0, -3, 1, 0, 0, 0), ('-1-0 -3 +1:00:00', '-1 years -3 days +01:00:00', '@ 1 year 3 days -1 hours ago', 'P-1Y-3DT1H')), ((1, 0, 0, 0, 0, 0, 1), ('+1-0 +0 +0:00:00.000001', '1 year 00:00:00.000001', '@ 1 year 0.000001 secs', 'P1YT0.000001S')), ((1, 0, 0, 0, 0, 0, -1), ('+1-0 +0 -0:00:00.000001', '1 year -00:00:00.000001', '@ 1 year -0.000001 secs', 'P1YT-0.000001S')), ((1, 2, 3, 4, 5, 6, 7), ('+1-2 +3 +4:05:06.000007', '1 year 2 mons 3 days 04:05:06.000007', '@ 1 year 2 mons 3 days 4 hours 5 mins 6.000007 secs', 'P1Y2M3DT4H5M6.000007S')), ((0, 10, 3, -4, 5, -6, 7), ('+0-10 +3 -3:55:05.999993', '10 mons 3 days -03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs', 'P10M3DT-3H-55M-5.999993S')), ((0, -10, -3, 4, -5, 6, -7), ('-0-10 -3 +3:55:05.999993', '-10 mons -3 days +03:55:05.999993', '@ 10 mons 3 days -3 hours -55 mins -5.999993 secs ago', 'P-10M-3DT3H55M5.999993S'))] def test_cast_interval(self): from pg.cast import cast_interval for result, values in self.intervals: years, mons, days, hours, mins, secs, usecs = result days += 365 * years + 30 * mons interval = timedelta( days=days, hours=hours, minutes=mins, seconds=secs, microseconds=usecs) for value in values: self.assertEqual(cast_interval(value), interval) class TestEscapeFunctions(unittest.TestCase): """Test pg escape and unescape functions. The libpq interface memorizes some parameters of the last opened connection that influence the result of these functions. Therefore we cannot do rigid tests of these functions here. We leave this for the test module that runs with a database. """ def test_escape_string(self): f = pg.escape_string b = f(b'plain') self.assertIsInstance(b, bytes) self.assertEqual(b, b'plain') s = f('plain') self.assertIsInstance(s, str) self.assertEqual(s, 'plain') s = f("that's cheese") self.assertIsInstance(s, str) self.assertEqual(s, "that''s cheese") def test_escape_bytea(self): f = pg.escape_bytea b = f(b'plain') self.assertIsInstance(b, bytes) self.assertEqual(b, b'plain') s = f('plain') self.assertIsInstance(s, str) self.assertEqual(s, 'plain') s = f("that's cheese") self.assertIsInstance(s, str) self.assertEqual(s, "that''s cheese") def test_unescape_bytea(self): f = pg.unescape_bytea r = f(b'plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f('plain') self.assertIsInstance(r, bytes) self.assertEqual(r, b'plain') r = f(b"das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, "das is' käse".encode()) r = f("das is' k\\303\\244se") self.assertIsInstance(r, bytes) self.assertEqual(r, "das is' käse".encode()) r = f(b'O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') r = f('O\\000ps\\377!') self.assertEqual(r, b'O\x00ps\xff!') class TestConfigFunctions(unittest.TestCase): """Test the functions for changing default settings. The effect of most of these cannot be tested here, because that needs a database connection. So we merely test their existence here. """ def test_get_datestyle(self): self.assertIsNone(pg.get_datestyle()) def test_set_datestyle(self): datestyle = pg.get_datestyle() try: pg.set_datestyle('ISO, YMD') self.assertEqual(pg.get_datestyle(), 'ISO, YMD') pg.set_datestyle('Postgres, MDY') self.assertEqual(pg.get_datestyle(), 'Postgres, MDY') pg.set_datestyle('Postgres, DMY') self.assertEqual(pg.get_datestyle(), 'Postgres, DMY') pg.set_datestyle('SQL, MDY') self.assertEqual(pg.get_datestyle(), 'SQL, MDY') pg.set_datestyle('SQL, DMY') self.assertEqual(pg.get_datestyle(), 'SQL, DMY') pg.set_datestyle('German, DMY') self.assertEqual(pg.get_datestyle(), 'German, DMY') pg.set_datestyle(None) self.assertIsNone(pg.get_datestyle()) finally: pg.set_datestyle(datestyle) def test_get_decimal_point(self): r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '.') def test_set_decimal_point(self): point = pg.get_decimal_point() try: pg.set_decimal_point('*') r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, '*') finally: pg.set_decimal_point(point) r = pg.get_decimal_point() self.assertIsInstance(r, str) self.assertEqual(r, point) def test_get_decimal(self): r = pg.get_decimal() self.assertIs(r, Decimal) def test_set_decimal(self): decimal_class = Decimal try: pg.set_decimal(int) r = pg.get_decimal() self.assertIs(r, int) finally: pg.set_decimal(decimal_class) r = pg.get_decimal() self.assertIs(r, decimal_class) def test_get_bool(self): r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) def test_set_bool(self): use_bool = pg.get_bool() try: pg.set_bool(False) r = pg.get_bool() pg.set_bool(use_bool) self.assertIsInstance(r, bool) self.assertIs(r, False) pg.set_bool(True) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, True) finally: pg.set_bool(use_bool) r = pg.get_bool() self.assertIsInstance(r, bool) self.assertIs(r, use_bool) def test_get_bytea_escaped(self): r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) def test_set_bytea_escaped(self): bytea_escaped = pg.get_bytea_escaped() try: pg.set_bytea_escaped(True) r = pg.get_bytea_escaped() pg.set_bytea_escaped(bytea_escaped) self.assertIsInstance(r, bool) self.assertIs(r, True) pg.set_bytea_escaped(False) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, False) finally: pg.set_bytea_escaped(bytea_escaped) r = pg.get_bytea_escaped() self.assertIsInstance(r, bool) self.assertIs(r, bytea_escaped) def test_get_jsondecode(self): r = pg.get_jsondecode() self.assertTrue(callable(r)) self.assertIs(r, json.loads) def test_set_jsondecode(self): jsondecode = pg.get_jsondecode() try: pg.set_jsondecode(None) r = pg.get_jsondecode() self.assertIsNone(r) pg.set_jsondecode(str) r = pg.get_jsondecode() self.assertIs(r, str) self.assertRaises(TypeError, pg.set_jsondecode, 'invalid') finally: pg.set_jsondecode(jsondecode) r = pg.get_jsondecode() self.assertIs(r, jsondecode) class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def test_version(self): v = pg.version self.assertIsInstance(v, str) # make sure the version conforms to PEP440 re_version = r"""^ (\d[\.\d]*(?<= \d)) ((?:[abc]|rc)\d+)? (?:(\.post\d+))? (?:(\.dev\d+))? (?:(\+(?![.])[a-zA-Z0-9\.]*[a-zA-Z0-9]))? $""" match = re.match(re_version, v, re.X) self.assertIsNotNone(match) self.assertEqual(pg.__version__, v) if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_classic_largeobj.py000077500000000000000000000345741450706350600230640ustar00rootroot00000000000000#!/usr/bin/python """Test the classic PyGreSQL interface. Sub-tests for large object support. Contributed by Christoph Zwerschke. These tests need a database to test against. """ import os import tempfile import unittest from contextlib import suppress from typing import Any import pg # the module under test from .config import dbhost, dbname, dbpasswd, dbport, dbuser windows = os.name == 'nt' # noinspection PyArgumentList def connect(): """Create a basic pg connection to the test database.""" connection = pg.connect(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd) connection.query("set client_min_messages=warning") return connection class TestModuleConstants(unittest.TestCase): """Test the existence of the documented module constants.""" def test_large_object_int_constants(self): names = 'INV_READ INV_WRITE SEEK_SET SEEK_CUR SEEK_END'.split() for name in names: try: value = getattr(pg, name) except AttributeError: self.fail(f'Module constant {name} is missing') self.assertIsInstance(value, int) class TestCreatingLargeObjects(unittest.TestCase): """Test creating large objects using a connection.""" def setUp(self): self.c = connect() self.c.query('begin') def tearDown(self): self.c.query('rollback') self.c.close() def assertIsLargeObject(self, obj): # noqa: N802 self.assertIsNotNone(obj) self.assertTrue(hasattr(obj, 'open')) self.assertTrue(hasattr(obj, 'close')) self.assertTrue(hasattr(obj, 'oid')) self.assertTrue(hasattr(obj, 'pgcnx')) self.assertTrue(hasattr(obj, 'error')) self.assertIsInstance(obj.oid, int) self.assertNotEqual(obj.oid, 0) self.assertIs(obj.pgcnx, self.c) self.assertIsInstance(obj.error, str) self.assertFalse(obj.error) def test_lo_create(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) finally: del large_object def test_get_lo(self): large_object = self.c.locreate(pg.INV_READ | pg.INV_WRITE) try: self.assertIsLargeObject(large_object) oid = large_object.oid finally: del large_object data = b'some data to be shared' large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_WRITE) large_object.write(data) large_object.close() finally: del large_object large_object = self.c.getlo(oid) try: self.assertIsLargeObject(large_object) self.assertEqual(large_object.oid, oid) large_object.open(pg.INV_READ) r = large_object.read(80) large_object.close() large_object.unlink() finally: del large_object self.assertIsInstance(r, bytes) self.assertEqual(r, data) def test_lo_import(self): f : Any if windows: # NamedTemporaryFiles don't work well here fname = 'temp_test_pg_largeobj_import.txt' f = open(fname, 'wb') # noqa: SIM115 else: f = tempfile.NamedTemporaryFile() fname = f.name data = b'some data to be imported' f.write(data) if windows: f.close() f = open(fname, 'rb') # noqa: SIM115 else: f.flush() f.seek(0) large_object = self.c.loimport(f.name) try: f.close() if windows: os.remove(fname) self.assertIsLargeObject(large_object) large_object.open(pg.INV_READ) large_object.seek(0, pg.SEEK_SET) r = large_object.size() self.assertIsInstance(r, int) self.assertEqual(r, len(data)) r = large_object.read(80) self.assertIsInstance(r, bytes) self.assertEqual(r, data) large_object.close() large_object.unlink() finally: del large_object class TestLargeObjects(unittest.TestCase): """Test the large object methods.""" def setUp(self): self.pgcnx = connect() self.pgcnx.query('begin') self.obj = self.pgcnx.locreate(pg.INV_READ | pg.INV_WRITE) def tearDown(self): if self.obj.oid: with suppress(SystemError, OSError): self.obj.close() with suppress(SystemError, OSError): self.obj.unlink() del self.obj with suppress(SystemError): self.pgcnx.query('rollback') self.pgcnx.close() def test_class_name(self): self.assertEqual(self.obj.__class__.__name__, 'LargeObject') def test_module_name(self): self.assertEqual(self.obj.__class__.__module__, 'pg') def test_oid(self): self.assertIsInstance(self.obj.oid, int) self.assertNotEqual(self.obj.oid, 0) def test_pgcn(self): self.assertIs(self.obj.pgcnx, self.pgcnx) def test_error(self): self.assertIsInstance(self.obj.error, str) self.assertEqual(self.obj.error, '') def test_str(self): self.obj.open(pg.INV_WRITE) data = b'some object to be printed' self.obj.write(data) oid = self.obj.oid r = str(self.obj) self.assertEqual(r, f'Opened large object, oid {oid}') self.obj.close() r = str(self.obj) self.assertEqual(r, f'Closed large object, oid {oid}') def test_repr(self): r = repr(self.obj) self.assertTrue(r.startswith('= len(self.sent): return True sleep(0.01) def receive(self, stop=False): if not self.sent: stop = True if stop: self.notify_handler(stop=True, payload='stop') self.assertTrue(self.wait()) self.assertFalse(self.timeout) self.assertEqual(self.received, self.sent) self.received = [] self.sent = [] self.assertEqual(self.handler.listening, not self.stopped) def test_notify_handler_empty(self): self.start_handler() self.notify_handler(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def test_notify_query_empty(self): self.start_handler() self.notify_query(stop=True) self.assertEqual(len(self.sent), 1) self.receive() def test_notify_handler_once(self): self.start_handler() self.notify_handler() self.assertEqual(len(self.sent), 1) self.receive() self.receive(stop=True) def test_notify_query_once(self): self.start_handler() self.notify_query() self.receive() self.notify_query(stop=True) self.receive() def test_notify_with_args(self): arg_dict = {'test': 42, 'more': 43, 'less': 41} self.start_handler('test_args', arg_dict) self.notify_query() self.receive(stop=True) def test_notify_several_times(self): arg_dict = {'test': 1} self.start_handler(arg_dict=arg_dict) for _n in range(3): self.notify_query() self.receive() arg_dict['test'] += 1 for _n in range(2): self.notify_handler() self.receive() arg_dict['test'] += 1 for _n in range(3): self.notify_query() self.receive(stop=True) def test_notify_once_with_payload(self): self.start_handler() self.notify_query(payload='test_payload') self.receive(stop=True) def test_notify_with_args_and_payload(self): self.start_handler(arg_dict={'foo': 'bar'}) self.notify_query(payload='baz') self.receive(stop=True) def test_notify_quoted_names(self): self.start_handler('Hello, World!') self.notify_query(payload='How do you do?') self.receive(stop=True) def test_notify_with_five_payloads(self): self.start_handler('gimme_5', {'test': 'Gimme 5'}) for n in range(5): self.notify_query(payload=f"Round {n}") self.assertEqual(len(self.sent), 5) self.receive(stop=True) def test_receive_immediately(self): self.start_handler('immediate', {'test': 'immediate'}) for n in range(3): self.notify_query(payload=f"Round {n}") self.receive() self.receive(stop=True) def test_notify_distinct_in_transaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for n in range(3): self.notify_query(payload=f'Round {n}') self.db.commit() self.receive(stop=True) def test_notify_same_in_transaction(self): self.start_handler('test_transaction', {'transaction': True}) self.db.begin() for _n in range(3): self.notify_query() self.db.commit() # these same notifications may be delivered as one, # so we must not wait for all three to appear self.sent = self.sent[:1] self.receive(stop=True) def test_notify_no_timeout(self): # noinspection PyTypeChecker self.start_handler(timeout=None) self.assertIsNone(self.handler.timeout) self.assertTrue(self.handler.listening) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def test_notify_zero_timeout(self): self.start_handler(timeout=0) self.assertEqual(self.handler.timeout, 0) self.assertTrue(self.handler.listening) self.assertFalse(self.timeout) def test_notify_without_timeout(self): self.start_handler(timeout=1) self.assertEqual(self.handler.timeout, 1) sleep(0.02) self.assertFalse(self.timeout) self.receive(stop=True) def test_notify_with_timeout(self): # noinspection PyTypeChecker self.start_handler(timeout=0.01) sleep(0.02) self.assertTrue(self.timeout) if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_dbapi20.py000077500000000000000000001600761450706350600210140ustar00rootroot00000000000000#!/usr/bin/python from __future__ import annotations import gc import unittest from datetime import date, datetime, time, timedelta, timezone from decimal import Decimal from typing import Any, ClassVar from uuid import UUID as Uuid # noqa: N811 import pgdb from . import dbapi20 from .config import dbhost, dbname, dbpasswd, dbport, dbuser class PgBitString: """Test object with a PostgreSQL representation as Bit String.""" def __init__(self, value): self.value = value def __pg_repr__(self): return f"B'{self.value:b}'" class TestPgDb(dbapi20.DatabaseAPI20Test): driver = pgdb connect_args = () connect_kw_args: ClassVar[dict[str, Any]] = { 'database': dbname, 'host': f"{dbhost or ''}:{dbport or -1}", 'user': dbuser, 'password': dbpasswd} lower_func = 'lower' # For stored procedure test def setUp(self): super().setUp() try: con = self._connect() con.close() except pgdb.Error: # try to create a missing database import pg try: # first try to log in as superuser db = pg.DB('postgres', dbhost or None, dbport or -1, user='postgres') except Exception: # then try to log in as current user db = pg.DB('postgres', dbhost or None, dbport or -1) db.query('create database ' + dbname) def tearDown(self): super().tearDown() def test_version(self): v = pgdb.version self.assertIsInstance(v, str) self.assertIn('.', v) self.assertEqual(pgdb.__version__, v) def test_connect_kwargs(self): application_name = 'PyGreSQL DB API 2.0 Test' self.connect_kw_args['application_name'] = application_name con = self._connect() cur = con.cursor() cur.execute("select application_name from pg_stat_activity" " where application_name = %s", (application_name,)) self.assertEqual(cur.fetchone(), (application_name,)) def test_connect_kwargs_with_special_chars(self): special_name = 'Single \' and double " quote and \\ backslash!' self.connect_kw_args['application_name'] = special_name con = self._connect() cur = con.cursor() cur.execute("select application_name from pg_stat_activity" " where application_name = %s", (special_name,)) self.assertEqual(cur.fetchone(), (special_name,)) def test_percent_sign(self): con = self._connect() cur = con.cursor() cur.execute("select %s, 'a %% sign'", ('a % sign',)) self.assertEqual(cur.fetchone(), ('a % sign', 'a % sign')) cur.execute("select 'a % sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) cur.execute("select 'a %% sign'") self.assertEqual(cur.fetchone(), ('a % sign',)) def test_paramstyles(self): self.assertEqual(pgdb.paramstyle, 'pyformat') con = self._connect() cur = con.cursor() # parameters can be passed as tuple cur.execute("select %s, %s, %s", (123, 'abc', True)) self.assertEqual(cur.fetchone(), (123, 'abc', True)) # parameters can be passed as dict cur.execute("select %(one)s, %(two)s, %(one)s, %(three)s", { "one": 123, "two": "abc", "three": True }) self.assertEqual(cur.fetchone(), (123, 'abc', 123, True)) def test_callproc_no_params(self): con = self._connect() cur = con.cursor() # note that now() does not change within a transaction cur.execute('select now()') now = cur.fetchone()[0] res = cur.callproc('now') self.assertIsNone(res) res = cur.fetchone()[0] self.assertEqual(res, now) def test_callproc_bad_params(self): con = self._connect() cur = con.cursor() self.assertRaises(TypeError, cur.callproc, 'lower', 42) self.assertRaises(pgdb.ProgrammingError, cur.callproc, 'lower', (42,)) def test_callproc_one_param(self): con = self._connect() cur = con.cursor() params = (42.4382,) res = cur.callproc("round", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 42) def test_callproc_two_params(self): con = self._connect() cur = con.cursor() params = (9, 4) res = cur.callproc("div", params) self.assertIs(res, params) res = cur.fetchone()[0] self.assertEqual(res, 2) def test_cursor_type(self): class TestCursor(pgdb.Cursor): @staticmethod def row_factory(row): return row # not used con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur = con.cursor() self.assertIsInstance(cur, TestCursor) con = self._connect() self.assertIs(con.cursor_type, pgdb.Cursor) cur = con.cursor() self.assertIsInstance(cur, pgdb.Cursor) self.assertNotIsInstance(cur, TestCursor) def test_row_factory(self): class TestCursor(pgdb.Cursor): def row_factory(self, row): description = self.description assert isinstance(description, list) return {f'column {desc[0]}': value for desc, value in zip(description, row)} con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) res = cur.execute("select 1 as a, 2 as b") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'column a': 1, 'column b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'column a': 1, 'column b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'column a': 3, 'column b': 4}) def test_build_row_factory(self): # noinspection PyAbstractClass class TestCursor(pgdb.Cursor): def build_row_factory(self): description = self.description assert isinstance(description, list) keys = [desc[0] for desc in description] return lambda row: { key: value for key, value in zip(keys, row)} con = self._connect() con.cursor_type = TestCursor cur = con.cursor() self.assertIsInstance(cur, TestCursor) cur.execute("select 1 as a, 2 as b") res = cur.fetchone() self.assertIsInstance(res, dict) self.assertEqual(res, {'a': 1, 'b': 2}) cur.execute("select 1 as a, 2 as b union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], dict) self.assertEqual(res[0], {'a': 1, 'b': 2}) self.assertIsInstance(res[1], dict) self.assertEqual(res[1], {'a': 3, 'b': 4}) # noinspection PyUnresolvedReferences def test_cursor_with_named_columns(self): con = self._connect() cur = con.cursor() res = cur.execute("select 1 as abc, 2 as de, 3 as f") self.assertIs(res, cur, 'execute() should return cursor') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) self.assertEqual(res._fields, ('abc', 'de', 'f')) self.assertEqual(res.abc, 1) self.assertEqual(res.de, 2) self.assertEqual(res.f, 3) cur.execute("select 1 as one, 2 as two union select 3, 4 order by 1") res = cur.fetchall() self.assertIsInstance(res, list) self.assertEqual(len(res), 2) self.assertIsInstance(res[0], tuple) self.assertEqual(res[0], (1, 2)) self.assertEqual(res[0]._fields, ('one', 'two')) self.assertIsInstance(res[1], tuple) self.assertEqual(res[1], (3, 4)) self.assertEqual(res[1]._fields, ('one', 'two')) # noinspection PyUnresolvedReferences def test_cursor_with_unnamed_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) self.assertEqual(res._fields, ('_0', '_1', '_2')) cur.execute("select 1 as one, 2, 3 as three") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3)) self.assertEqual(res._fields, ('one', '_1', 'three')) # noinspection PyUnresolvedReferences def test_cursor_with_badly_named_columns(self): con = self._connect() cur = con.cursor() cur.execute("select 1 as abc, 2 as def") res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2)) self.assertEqual(res._fields, ('abc', '_1')) cur.execute( 'select 1 as snake_case, 2 as "CamelCase",' ' 3 as "kebap-case", 4 as "_bad", 5 as "0bad", 6 as "bad$"') res = cur.fetchone() self.assertIsInstance(res, tuple) self.assertEqual(res, (1, 2, 3, 4, 5, 6)) self.assertEqual(res._fields[:2], ('snake_case', 'CamelCase')) fields = ('_2', '_3', '_4', '_5') self.assertEqual(res._fields[2:], fields) def test_colnames(self): con = self._connect() cur = con.cursor() cur.execute("select 1, 2, 3") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['?column?', '?column?', '?column?']) cur.execute("select 1 as a, 2 as bc, 3 as def, 4 as g") names = cur.colnames self.assertIsInstance(names, list) self.assertEqual(names, ['a', 'bc', 'def', 'g']) def test_coltypes(self): con = self._connect() cur = con.cursor() cur.execute("select 1::int2, 2::int4, 3::int8") types = cur.coltypes self.assertIsInstance(types, list) self.assertEqual(types, ['int2', 'int4', 'int8']) # noinspection PyUnresolvedReferences def test_description_fields(self): con = self._connect() cur = con.cursor() cur.execute("select 123456789::int8 col0," " 123456.789::numeric(41, 13) as col1," " 'foobar'::char(39) as col2") desc = cur.description self.assertIsInstance(desc, list) self.assertEqual(len(desc), 3) cols = [('int8', 8, None), ('numeric', 41, 13), ('bpchar', 39, None)] for i in range(3): c, d = cols[i], desc[i] self.assertIsInstance(d, tuple) self.assertEqual(len(d), 7) self.assertIsInstance(d.name, str) self.assertEqual(d.name, f'col{i}') self.assertIsInstance(d.type_code, str) self.assertEqual(d.type_code, c[0]) self.assertIsNone(d.display_size) self.assertIsInstance(d.internal_size, int) self.assertEqual(d.internal_size, c[1]) if c[2] is not None: self.assertIsInstance(d.precision, int) self.assertEqual(d.precision, c[1]) self.assertIsInstance(d.scale, int) self.assertEqual(d.scale, c[2]) else: self.assertIsNone(d.precision) self.assertIsNone(d.scale) self.assertIsNone(d.null_ok) def test_type_cache_info(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('numeric', type_cache) type_info = type_cache['numeric'] self.assertIn('numeric', type_cache) self.assertEqual(type_info, 'numeric') self.assertEqual(type_info.oid, 1700) self.assertEqual(type_info.len, -1) self.assertEqual(type_info.type, 'b') # base self.assertEqual(type_info.category, 'N') # numeric self.assertEqual(type_info.delim, ',') self.assertEqual(type_info.relid, 0) self.assertIs(con.type_cache[1700], type_info) self.assertNotIn('pg_type', type_cache) type_info = type_cache['pg_type'] self.assertIn('pg_type', type_cache) self.assertEqual(type_info.type, 'c') # composite self.assertEqual(type_info.category, 'C') # composite cols = type_cache.get_fields('pg_type') if cols[0].name == 'oid': # PostgreSQL < 12 del cols[0] self.assertEqual(cols[0].name, 'typname') typname = type_cache[cols[0].type] self.assertEqual(typname, 'name') self.assertEqual(typname.type, 'b') # base self.assertEqual(typname.category, 'S') # string self.assertEqual(cols[3].name, 'typlen') typlen = type_cache[cols[3].type] self.assertEqual(typlen, 'int2') self.assertEqual(typlen.type, 'b') # base self.assertEqual(typlen.category, 'N') # numeric cur.close() cur = con.cursor() type_cache = con.type_cache self.assertIn('numeric', type_cache) cur.close() finally: con.close() con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertNotIn('pg_type', type_cache) self.assertEqual(type_cache.get('pg_type'), type_info) self.assertIn('pg_type', type_cache) self.assertIsNone(type_cache.get( self.table_prefix + '_surely_does_not_exist')) cur.close() finally: con.close() def test_type_cache_typecast(self): con = self._connect() try: cur = con.cursor() type_cache = con.type_cache self.assertIs(type_cache.get_typecast('int4'), int) cast_int = lambda v: f'int({v})' # noqa: E731 type_cache.set_typecast('int4', cast_int) query = 'select 2::int2, 4::int4, 8::int8' cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) self.assertEqual(type_cache.typecast(42, 'int4'), 'int(42)') type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') type_cache.reset_typecast('int4') cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast(['int2', 'int8']) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) type_cache.set_typecast(['int2', 'int8'], cast_int) cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') type_cache.reset_typecast() cur.execute(query) i2, i4, i8 = cur.fetchone() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) cur.close() finally: con.close() def test_cursor_iteration(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2 union select 3 order by 1") self.assertEqual([r[0] for r in cur], [1, 2, 3]) def test_cursor_invalidation(self): con = self._connect() cur = con.cursor() cur.execute("select 1 union select 2") self.assertEqual(cur.fetchone(), (1,)) self.assertFalse(con.closed) con.close() self.assertTrue(con.closed) self.assertRaises(pgdb.OperationalError, cur.fetchone) def test_fetch_2_rows(self): values = ('test', pgdb.Binary(b'\xff\x52\xb2'), True, 5, 6, 5.7, Decimal('234.234234'), Decimal('75.45'), pgdb.Date(2011, 7, 17), pgdb.Time(15, 47, 42), pgdb.Timestamp(2008, 10, 20, 15, 25, 35), pgdb.Interval(15, 31, 5), 7897234) table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set datestyle to iso") cur.execute( f"create table {table} (" "stringtest varchar," "binarytest bytea," "booltest bool," "integertest int4," "longtest int8," "floattest float8," "numerictest numeric," "moneytest money," "datetest date," "timetest time," "datetimetest timestamp," "intervaltest interval," "rowidtest oid)") cur.execute("set standard_conforming_strings to on") for s in ('numeric', 'monetary', 'time'): cur.execute(f"set lc_{s} to 'C'") for _i in range(2): cur.execute( f"insert into {table} values (" "%s,%s,%s,%s,%s,%s,%s," "'%s'::money,%s,%s,%s,%s,%s)", values) cur.execute(f"select * from {table}") rows = cur.fetchall() self.assertEqual(len(rows), 2) row0 = rows[0] self.assertEqual(row0, values) self.assertEqual(row0, rows[1]) self.assertIsInstance(row0[0], str) self.assertIsInstance(row0[1], bytes) self.assertIsInstance(row0[2], bool) self.assertIsInstance(row0[3], int) self.assertIsInstance(row0[4], int) self.assertIsInstance(row0[5], float) self.assertIsInstance(row0[6], Decimal) self.assertIsInstance(row0[7], Decimal) self.assertIsInstance(row0[8], date) self.assertIsInstance(row0[9], time) self.assertIsInstance(row0[10], datetime) self.assertIsInstance(row0[11], timedelta) finally: con.close() def test_integrity_error(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute("set client_min_messages = warning") cur.execute(f"create table {table} (i int primary key)") cur.execute(f"insert into {table} values (1)") cur.execute(f"insert into {table} values (2)") self.assertRaises( pgdb.IntegrityError, cur.execute, f"insert into {table} values (1)") finally: con.close() def test_update_rowcount(self): table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute(f"create table {table} (i int)") cur.execute(f"insert into {table} values (1)") cur.execute(f"update {table} set i=2 where i=2 returning i") self.assertEqual(cur.rowcount, 0) cur.execute(f"update {table} set i=2 where i=1 returning i") self.assertEqual(cur.rowcount, 1) cur.close() # keep rowcount even if cursor is closed (needed by SQLAlchemy) self.assertEqual(cur.rowcount, 1) finally: con.close() def test_sqlstate(self): con = self._connect() cur = con.cursor() try: cur.execute("select 1/0") except pgdb.DatabaseError as error: self.assertIsInstance(error, pgdb.DataError) # the SQLSTATE error code for division by zero is 22012 # noinspection PyUnresolvedReferences self.assertEqual(error.sqlstate, '22012') def test_float(self): nan, inf = float('nan'), float('inf') from math import isinf, isnan self.assertTrue(isnan(nan) and not isinf(nan)) self.assertTrue(isinf(inf) and not isnan(inf)) values = [0, 1, 0.03125, -42.53125, nan, inf, -inf, 'nan', 'inf', '-inf', 'NaN', 'Infinity', '-Infinity'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( f"create table {table} (n smallint, floattest float)") params = enumerate(values) cur.executemany(f"insert into {table} values (%d,%s)", params) cur.execute(f"select floattest from {table} order by n") rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.FLOAT) self.assertNotEqual(cur.description[0].type_code, pgdb.ARRAY) self.assertNotEqual(cur.description[0].type_code, pgdb.RECORD) finally: con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] for inval, outval in zip(values, rows): if inval in ('inf', 'Infinity'): inval = inf elif inval in ('-inf', '-Infinity'): inval = -inf elif inval in ('nan', 'NaN'): inval = nan if isinf(inval): # type: ignore self.assertTrue(isinf(outval)) if inval < 0: # type: ignore self.assertLess(outval, 0) else: self.assertGreater(outval, 0) elif isnan(inval): # type: ignore self.assertTrue(isnan(outval)) else: self.assertEqual(inval, outval) def test_datetime(self): dt = datetime(2011, 7, 17, 15, 47, 42, 317509) values = [dt.date(), dt.time(), dt, dt.time(), dt] self.assertIsInstance(values[3], time) assert isinstance(values[3], time) # type guard values[3] = values[3].replace(tzinfo=timezone.utc) self.assertIsInstance(values[4], datetime) assert isinstance(values[4], datetime) # type guard values[4] = values[4].replace(tzinfo=timezone.utc) da = (dt.year, dt.month, dt.day) ti = (dt.hour, dt.minute, dt.second, dt.microsecond) tz = (timezone.utc,) inputs = [ # input as objects values, # input as text [v.isoformat() for v in values], # type: ignore # # input using type helpers [pgdb.Date(*da), pgdb.Time(*ti), pgdb.Timestamp(*(da + ti)), pgdb.Time(*(ti + tz)), pgdb.Timestamp(*(da + ti + tz))] ] table = self.table_prefix + 'booze' con: pgdb.Connection = self._connect() try: cur = con.cursor() cur.execute("set timezone = UTC") cur.execute(f"create table {table} (" "d date, t time, ts timestamp," "tz timetz, tsz timestamptz)") for params in inputs: for datestyle in ('iso', 'postgres, mdy', 'postgres, dmy', 'sql, mdy', 'sql, dmy', 'german'): cur.execute(f"set datestyle to {datestyle}") if not isinstance(params[0], str): cur.execute("select %s,%s,%s,%s,%s", params) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute( f"insert into {table}" " values (%s,%s,%s,%s,%s)", params) cur.execute(f"select * from {table}") d = cur.description self.assertIsInstance(d, list) assert d is not None # type guard for i in range(5): tc = d[i].type_code self.assertEqual(tc, pgdb.DATETIME) self.assertNotEqual(tc, pgdb.STRING) self.assertNotEqual(tc, pgdb.ARRAY) self.assertNotEqual(tc, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.DATE) self.assertEqual(d[1].type_code, pgdb.TIME) self.assertEqual(d[2].type_code, pgdb.TIMESTAMP) self.assertEqual(d[3].type_code, pgdb.TIME) self.assertEqual(d[4].type_code, pgdb.TIMESTAMP) row = cur.fetchone() self.assertEqual(row, tuple(values)) cur.execute(f"truncate table {table}") finally: con.close() def test_interval(self): td = datetime(2011, 7, 17, 15, 47, 42, 317509) - datetime(1970, 1, 1) inputs = [ # input as objects td, # input as text f'{td.days} days {td.seconds} seconds' f' {td.microseconds} microseconds', # input using type helpers pgdb.Interval(td.days, 0, 0, td.seconds, td.microseconds)] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute(f"create table {table} (i interval)") for param in inputs: for intervalstyle in ('sql_standard ', 'postgres', 'postgres_verbose', 'iso_8601'): cur.execute(f"set intervalstyle to {intervalstyle}") # noinspection PyUnboundLocalVariable cur.execute(f"insert into {table} values (%s)", [param]) cur.execute(f"select * from {table}") tc = cur.description[0].type_code self.assertEqual(tc, pgdb.DATETIME) self.assertNotEqual(tc, pgdb.STRING) self.assertNotEqual(tc, pgdb.ARRAY) self.assertNotEqual(tc, pgdb.RECORD) self.assertEqual(tc, pgdb.INTERVAL) row = cur.fetchone() self.assertEqual(row, (td,)) cur.execute(f"truncate table {table}") finally: con.close() def test_hstore(self): con = self._connect() cur = con.cursor() try: cur.execute("select 'k=>v'::hstore") except pgdb.DatabaseError: try: cur.execute("create extension hstore") except pgdb.DatabaseError: self.skipTest("hstore extension not enabled") finally: con.close() d = {'k': 'v', 'foo': 'bar', 'baz': 'whatever', 'back\\': '\\slash', '1a': 'anything at all', '2=b': 'value = 2', '3>c': 'value > 3', '4"c': 'value " 4', "5'c": "value ' 5", 'hello, world': '"hi!"', 'None': None, 'NULL': 'NULL', 'empty': ''} con = self._connect() try: cur = con.cursor() cur.execute("select %s::hstore", (pgdb.Hstore(d),)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, dict) self.assertEqual(result, d) def test_uuid(self): self.assertIs(Uuid, pgdb.Uuid) d = Uuid('{12345678-1234-5678-1234-567812345678}') con = self._connect() try: cur = con.cursor() cur.execute("select %s::uuid", (d,)) result = cur.fetchone()[0] finally: con.close() self.assertIsInstance(result, Uuid) self.assertEqual(result, d) def test_insert_array(self): values: list[tuple[Any, Any]] = [ (None, None), ([], []), ([None], [[None], ['null']]), ([1, 2, 3], [['a', 'b'], ['c', 'd']]), ([20000, 25000, 25000, 30000], [['breakfast', 'consulting'], ['meeting', 'lunch']]), ([0, 1, -1], [['Hello, World!', '"Hi!"'], ['{x,y}', ' x y ']])] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute( f"create table {table} (n smallint, i int[], t text[][])") params = [(n, v[0], v[1]) for n, v in enumerate(values)] # Note that we must explicit casts because we are inserting # empty arrays. Otherwise this is not necessary. cur.executemany( f"insert into {table} values" " (%d,%s::int[],%s::text[][])", params) cur.execute(f"select i, t from {table} order by n") d = cur.description self.assertEqual(d[0].type_code, pgdb.ARRAY) self.assertNotEqual(d[0].type_code, pgdb.RECORD) self.assertEqual(d[0].type_code, pgdb.NUMBER) self.assertEqual(d[0].type_code, pgdb.INTEGER) self.assertEqual(d[1].type_code, pgdb.ARRAY) self.assertNotEqual(d[1].type_code, pgdb.RECORD) self.assertEqual(d[1].type_code, pgdb.STRING) rows = cur.fetchall() finally: con.close() self.assertEqual(rows, values) def test_select_array(self): values = ([1, 2, 3, None], ['a', 'b', 'c', None]) con = self._connect() try: cur = con.cursor() cur.execute("select %s::int[], %s::text[]", values) row = cur.fetchone() finally: con.close() self.assertEqual(row, values) def test_unicode_list_and_tuple(self): value = ('Käse', 'Würstchen') con = self._connect() try: cur = con.cursor() try: cur.execute("select %s, %s", value) except pgdb.DatabaseError: self.skipTest('database does not support latin-1') row = cur.fetchone() cur.execute("select %s, %s", (list(value), tuple(value))) as_list, as_tuple = cur.fetchone() finally: con.close() self.assertEqual(as_list, list(row)) self.assertEqual(as_tuple, tuple(row)) def test_insert_record(self): values = [('John', 61), ('Jane', 63), ('Fred', None), ('Wilma', None), (None, 42), (None, None)] table = self.table_prefix + 'booze' record = self.table_prefix + 'munch' con = self._connect() cur = con.cursor() try: cur.execute(f"create type {record} as (name varchar, age int)") cur.execute(f"create table {table} (n smallint, r {record})") params = enumerate(values) cur.executemany(f"insert into {table} values (%d,%s)", params) cur.execute(f"select r from {table} order by n") type_code = cur.description[0].type_code self.assertEqual(type_code, record) self.assertEqual(type_code, pgdb.RECORD) self.assertNotEqual(type_code, pgdb.ARRAY) columns = con.type_cache.get_fields(type_code) self.assertEqual(columns[0].name, 'name') self.assertEqual(columns[1].name, 'age') self.assertEqual(con.type_cache[columns[0].type], 'varchar') self.assertEqual(con.type_cache[columns[1].type], 'int4') rows = cur.fetchall() finally: cur.execute(f'drop table {table}') cur.execute(f'drop type {record}') con.close() self.assertEqual(len(rows), len(values)) rows = [row[0] for row in rows] self.assertEqual(rows, values) self.assertEqual(rows[0].name, 'John') self.assertEqual(rows[0].age, 61) def test_select_record(self): value = (1, 25000, 2.5, 'hello', 'Hello World!', 'Hello, World!', '(test)', '(x,y)', ' x y ', 'null', None) con = self._connect() try: cur = con.cursor() cur.execute("select %s as test_record", [value]) self.assertEqual(cur.description[0].name, 'test_record') self.assertEqual(cur.description[0].type_code, 'record') row = cur.fetchone()[0] finally: con.close() # Note that the element types get lost since we created an # untyped record (an anonymous composite type). For the same # reason this is also a normal tuple, not a named tuple. text_row = tuple(None if v is None else str(v) for v in value) self.assertEqual(row, text_row) def test_custom_type(self): values = [3, 5, 65] values = list(map(PgBitString, values)) # type: ignore table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() seq_params = enumerate(values) # params have __pg_repr__ method cur.execute( f'create table "{table}" (n smallint, b bit varying(7))') cur.executemany(f"insert into {table} values (%s,%s)", seq_params) cur.execute(f"select * from {table}") rows = cur.fetchall() finally: con.close() self.assertEqual(len(rows), len(values)) con = self._connect() try: cur = con.cursor() params = (1, object()) # an object that cannot be handled self.assertRaises( pgdb.InterfaceError, cur.execute, f"insert into {table} values (%s,%s)", params) finally: con.close() def test_set_decimal_type(self): from pgdb.cast import decimal_type self.assertIs(decimal_type(), Decimal) con = self._connect() try: cur = con.cursor() # change decimal type globally to int class CustomDecimal(str): def __init__(self, value: Any) -> None: self.value = value def __str__(self) -> str: return str(self.value).replace('.', ',') self.assertIs(decimal_type(CustomDecimal), CustomDecimal) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] self.assertIsInstance(value, CustomDecimal) self.assertEqual(str(value), '4,25') # change decimal type again to float self.assertIs(decimal_type(float), float) cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # the connection still uses the old setting self.assertIsInstance(value, str) self.assertEqual(str(value), '4,25') # bust the cache for type functions for the connection con.type_cache.reset_typecast() cur.execute('select 4.25') self.assertEqual(cur.description[0].type_code, pgdb.NUMBER) value = cur.fetchone()[0] # now the connection uses the new setting self.assertIsInstance(value, float) self.assertEqual(value, 4.25) finally: con.close() decimal_type(Decimal) self.assertIs(decimal_type(), Decimal) def test_global_typecast(self): try: query = 'select 2::int2, 4::int4, 8::int8' self.assertIs(pgdb.get_typecast('int4'), int) cast_int = lambda v: f'int({v})' # noqa: E731 pgdb.set_typecast('int4', cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 'int(4)') self.assertEqual(i8, 'int(8)') pgdb.reset_typecast('int4') con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') pgdb.reset_typecast(['int2', 'int8']) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) pgdb.set_typecast(['int2', 'int8'], cast_int) con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 'int(2)') self.assertEqual(i4, 4) self.assertEqual(i8, 'int(8)') finally: pgdb.reset_typecast() con = self._connect() try: i2, i4, i8 = con.cursor().execute(query).fetchone() finally: con.close() self.assertEqual(i2, 2) self.assertEqual(i4, 4) self.assertEqual(i8, 8) def test_set_typecast_for_arrays(self): query = 'select ARRAY[1,2,3]' try: con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) pgdb.set_typecast('anyarray', lambda v, basecast: v) con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, str) self.assertEqual(r, '{1,2,3}') finally: pgdb.reset_typecast() con = self._connect() try: r = con.cursor().execute(query).fetchone()[0] finally: con.close() self.assertIsInstance(r, list) self.assertEqual(r, [1, 2, 3]) def test_unicode_with_utf8(self): table = self.table_prefix + 'booze' s = "He wes Leovenaðes sone — liðe him be Drihten" con = self._connect() cur = con.cursor() try: cur.execute(f"create table {table} (t text)") try: cur.execute("set client_encoding=utf8") cur.execute(f"select '{s}'") except Exception: self.skipTest("database does not support utf8") output1 = cur.fetchone()[0] cur.execute(f"insert into {table} values (%s)", (s,)) cur.execute(f"select * from {table}") output2 = cur.fetchone()[0] cur.execute(f"select t = '{s}' from {table}") output3 = cur.fetchone()[0] cur.execute(f"select t = %s from {table}", (s,)) output4 = cur.fetchone()[0] finally: con.close() self.assertIsInstance(output1, str) self.assertEqual(output1, s) self.assertIsInstance(output2, str) self.assertEqual(output2, s) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_unicode_with_latin1(self): table = self.table_prefix + 'booze' s = "Ehrt den König seine Würde, ehret uns der Hände Fleiß." con = self._connect() try: cur = con.cursor() cur.execute(f"create table {table} (t text)") try: cur.execute("set client_encoding=latin1") cur.execute(f"select '{s}'") except Exception: self.skipTest("database does not support latin1") output1 = cur.fetchone()[0] cur.execute(f"insert into {table} values (%s)", (s,)) cur.execute(f"select * from {table}") output2 = cur.fetchone()[0] cur.execute(f"select t = '{s}' from {table}") output3 = cur.fetchone()[0] cur.execute(f"select t = %s from {table}", (s,)) output4 = cur.fetchone()[0] finally: con.close() self.assertIsInstance(output1, str) self.assertEqual(output1, s) self.assertIsInstance(output2, str) self.assertEqual(output2, s) self.assertIsInstance(output3, bool) self.assertTrue(output3) self.assertIsInstance(output4, bool) self.assertTrue(output4) def test_bool(self): values = [False, True, None, 't', 'f', 'true', 'false'] table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() cur.execute(f"create table {table} (n smallint, booltest bool)") params = enumerate(values) cur.executemany(f"insert into {table} values (%s,%s)", params) cur.execute(f"select booltest from {table} order by n") rows = cur.fetchall() self.assertEqual(cur.description[0].type_code, pgdb.BOOL) finally: con.close() rows = [row[0] for row in rows] values[3] = values[5] = True values[4] = values[6] = False self.assertEqual(rows, values) def test_literal(self): con = self._connect() try: cur = con.cursor() value = "lower('Hello')" cur.execute("select %s, %s", (value, pgdb.Literal(value))) row = cur.fetchone() finally: con.close() self.assertEqual(row, (value, 'hello')) def test_json(self): inval = {"employees": [ {"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute(f"create table {table} (jsontest json)") except pgdb.ProgrammingError: self.skipTest('database does not support json') params = (pgdb.Json(inval),) cur.execute(f"insert into {table} values (%s)", params) cur.execute(f"select jsontest from {table}") outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_jsonb(self): inval = {"employees": [ {"firstName": "John", "lastName": "Doe", "age": 61}]} table = self.table_prefix + 'booze' con = self._connect() try: cur = con.cursor() try: cur.execute(f"create table {table} (jsonbtest jsonb)") except pgdb.ProgrammingError: self.skipTest('database does not support jsonb') params = (pgdb.Json(inval),) cur.execute(f"insert into {table} values (%s)", params) cur.execute(f"select jsonbtest from {table}") outval = cur.fetchone()[0] self.assertEqual(cur.description[0].type_code, pgdb.JSON) finally: con.close() self.assertEqual(inval, outval) def test_execute_edge_cases(self): con = self._connect() try: cur = con.cursor() sql = 'invalid' # should be ignored with empty parameter list cur.executemany(sql, []) sql = 'select %d + 1' cur.execute(sql, [(1,), (2,)]) # deprecated use of execute() self.assertEqual(cur.fetchone()[0], 3) sql = 'select 1/0' # cannot be executed self.assertRaises(pgdb.DataError, cur.execute, sql) cur.close() con.rollback() if pgdb.shortcutmethods: res = con.execute('select %d', (1,)).fetchone() self.assertEqual(res, (1,)) res = con.executemany('select %d', [(1,), (2,)]).fetchone() self.assertEqual(res, (2,)) finally: con.close() sql = 'select 1' # cannot be executed after connection is closed self.assertRaises(pgdb.OperationalError, cur.execute, sql) def test_fetchall_with_various_sizes(self): # we test this because there are optimizations based on result size con = self._connect() try: for n in (1, 3, 5, 7, 10, 100, 1000): cur = con.cursor() try: cur.execute('select n, n::text as s, n % 2 = 1 as b' f' from generate_series(1, {n}) as s(n)') res = cur.fetchall() self.assertEqual(len(res), n, res) self.assertEqual(len(res[0]), 3) self.assertEqual(res[0].n, 1) self.assertEqual(res[0].s, '1') self.assertIs(res[0].b, True) self.assertEqual(len(res[-1]), 3) self.assertEqual(res[-1].n, n) self.assertEqual(res[-1].s, str(n)) self.assertIs(res[-1].b, n % 2 == 1) finally: cur.close() finally: con.close() def test_fetchmany_with_keep(self): con = self._connect() try: cur = con.cursor() self.assertEqual(cur.arraysize, 1) cur.execute('select * from generate_series(1, 25)') self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(len(cur.fetchmany()), 1) self.assertEqual(cur.arraysize, 1) cur.arraysize = 3 self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany(size=2)), 2) self.assertEqual(cur.arraysize, 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany()), 3) self.assertEqual(len(cur.fetchmany(size=2, keep=True)), 2) self.assertEqual(cur.arraysize, 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany()), 2) self.assertEqual(len(cur.fetchmany(25)), 3) finally: con.close() def help_nextset_setup(self, _cur): pass # helper not needed def help_nextset_teardown(self, _cur): pass # helper not needed def test_nextset(self): con = self._connect() cur = con.cursor() self.assertRaises(con.NotSupportedError, cur.nextset) def test_setoutputsize(self): pass # not supported def test_connection_errors(self): con = self._connect() self.assertEqual(con.Error, pgdb.Error) self.assertEqual(con.Warning, pgdb.Warning) self.assertEqual(con.InterfaceError, pgdb.InterfaceError) self.assertEqual(con.DatabaseError, pgdb.DatabaseError) self.assertEqual(con.InternalError, pgdb.InternalError) self.assertEqual(con.OperationalError, pgdb.OperationalError) self.assertEqual(con.ProgrammingError, pgdb.ProgrammingError) self.assertEqual(con.IntegrityError, pgdb.IntegrityError) self.assertEqual(con.DataError, pgdb.DataError) self.assertEqual(con.NotSupportedError, pgdb.NotSupportedError) def test_transaction(self): table = self.table_prefix + 'booze' con1 = self._connect() cur1 = con1.cursor() self.execute_ddl1(cur1) con1.commit() con2 = self._connect() cur2 = con2.cursor() cur2.execute(f"select name from {table}") self.assertIsNone(cur2.fetchone()) cur1.execute(f"insert into {table} values('Schlafly')") cur2.execute(f"select name from {table}") self.assertIsNone(cur2.fetchone()) con1.commit() cur2.execute(f"select name from {table}") self.assertEqual(cur2.fetchone(), ('Schlafly',)) con2.close() con1.close() def test_autocommit(self): table = self.table_prefix + 'booze' con1 = self._connect() con1.autocommit = True cur1 = con1.cursor() self.execute_ddl1(cur1) con2 = self._connect() cur2 = con2.cursor() cur2.execute(f"select name from {table}") self.assertIsNone(cur2.fetchone()) cur1.execute(f"insert into {table} values('Shmaltz Pastrami')") cur2.execute(f"select name from {table}") self.assertEqual(cur2.fetchone(), ('Shmaltz Pastrami',)) con2.close() con1.close() def test_connection_as_contextmanager(self): table = self.table_prefix + 'booze' for autocommit in False, True: con = self._connect() con.autocommit = autocommit try: cur = con.cursor() if autocommit: cur.execute(f"truncate table {table}") else: cur.execute( f"create table {table} (n smallint check(n!=4))") with con: cur.execute(f"insert into {table} values (1)") cur.execute(f"insert into {table} values (2)") try: with con: cur.execute(f"insert into {table} values (3)") cur.execute(f"insert into {table} values (4)") except con.IntegrityError as error: self.assertIn('check', str(error).lower()) with con: cur.execute(f"insert into {table} values (5)") cur.execute(f"insert into {table} values (6)") try: with con: cur.execute(f"insert into {table} values (7)") cur.execute(f"insert into {table} values (8)") raise ValueError('transaction should rollback') except ValueError as error: self.assertEqual(str(error), 'transaction should rollback') with con: cur.execute(f"insert into {table} values (9)") cur.execute(f"select * from {table} order by 1") rows = cur.fetchall() rows = [row[0] for row in rows] finally: con.close() self.assertEqual(rows, [1, 2, 5, 6, 9]) def test_cursor_connection(self): con = self._connect() cur = con.cursor() self.assertEqual(cur.connection, con) cur.close() def test_cursor_as_contextmanager(self): con = self._connect() with con.cursor() as cur: self.assertEqual(cur.connection, con) def test_pgdb_type(self): self.assertEqual(pgdb.STRING, pgdb.STRING) self.assertNotEqual(pgdb.STRING, pgdb.INTEGER) self.assertNotEqual(pgdb.STRING, pgdb.BOOL) self.assertNotEqual(pgdb.BOOL, pgdb.INTEGER) self.assertEqual(pgdb.INTEGER, pgdb.INTEGER) self.assertNotEqual(pgdb.INTEGER, pgdb.NUMBER) self.assertEqual('char', pgdb.STRING) self.assertEqual('varchar', pgdb.STRING) self.assertEqual('text', pgdb.STRING) self.assertNotEqual('numeric', pgdb.STRING) self.assertEqual('numeric', pgdb.NUMERIC) self.assertEqual('numeric', pgdb.NUMBER) self.assertEqual('int4', pgdb.NUMBER) self.assertNotEqual('int4', pgdb.NUMERIC) self.assertEqual('int2', pgdb.SMALLINT) self.assertNotEqual('int4', pgdb.SMALLINT) self.assertEqual('int2', pgdb.INTEGER) self.assertEqual('int4', pgdb.INTEGER) self.assertEqual('int8', pgdb.INTEGER) self.assertNotEqual('int4', pgdb.LONG) self.assertEqual('int8', pgdb.LONG) self.assertIn('char', pgdb.STRING) self.assertLess(pgdb.NUMERIC, pgdb.NUMBER) self.assertGreaterEqual(pgdb.NUMBER, pgdb.INTEGER) self.assertLessEqual(pgdb.TIME, pgdb.DATETIME) self.assertGreaterEqual(pgdb.DATETIME, pgdb.DATE) self.assertEqual(pgdb.ARRAY, pgdb.ARRAY) self.assertNotEqual(pgdb.ARRAY, pgdb.STRING) self.assertEqual('_char', pgdb.ARRAY) self.assertNotEqual('char', pgdb.ARRAY) self.assertEqual(pgdb.RECORD, pgdb.RECORD) self.assertNotEqual(pgdb.RECORD, pgdb.STRING) self.assertNotEqual(pgdb.RECORD, pgdb.ARRAY) self.assertEqual('record', pgdb.RECORD) self.assertNotEqual('_record', pgdb.RECORD) def test_no_close(self): data = ('hello', 'world') con = self._connect() cur = con.cursor() cur.build_row_factory = lambda: tuple cur.execute("select %s, %s", data) row = cur.fetchone() self.assertEqual(row, data) def test_change_row_factory_cache_size(self): from pg import RowCache queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc'] con = self._connect() cur = con.cursor() for maxsize in (None, 0, 1, 2, 3, 10, 1024): RowCache.change_size(maxsize) for _i in range(3): for q in queries: cur.execute(q) r = cur.fetchone() if q.endswith('abc'): self.assertEqual(r, (123,)) self.assertEqual(r._fields, ('abc',)) else: self.assertEqual(r, (1, 2, 3)) self.assertEqual(r._fields, ('a', 'b', 'c')) info = RowCache.row_factory.cache_info() self.assertEqual(info.maxsize, maxsize) self.assertEqual(info.hits + info.misses, 6) self.assertEqual(info.hits, 0 if maxsize is not None and maxsize < 2 else 4) def test_memory_leaks(self): ids: set = set() objs: list = [] add_ids = ids.update gc.collect() objs[:] = gc.get_objects() add_ids(id(obj) for obj in objs) self.test_no_close() gc.collect() objs[:] = gc.get_objects() objs[:] = [obj for obj in objs if id(obj) not in ids] self.assertEqual(len(objs), 0) def test_cve_2018_1058(self): # internal queries should use qualified table and operator names, # see https://nvd.nist.gov/vuln/detail/CVE-2018-1058 con = self._connect() cur = con.cursor() execute = cur.execute try: execute("SET client_min_messages TO WARNING") execute("SET TIMEZONE TO 'UTC'") execute("SHOW TIMEZONE") self.assertEqual(cur.fetchone()[0], 'UTC') execute(""" CREATE OR REPLACE FUNCTION public.bad_eq(oid, integer) RETURNS boolean AS $$ BEGIN SET TIMEZONE TO 'CET'; RETURN oideq($1, $2::oid); END $$ LANGUAGE plpgsql """) execute("DROP OPERATOR IF EXISTS public.= (oid, integer)") execute(""" CREATE OPERATOR public.= ( PROCEDURE = public.bad_eq, LEFTARG = oid, RIGHTARG = integer ); """) # the following select changes the time zone as a side effect if # internal query uses unqualified = operator as it did earlier execute("SELECT 1") execute("SHOW TIMEZONE") # make sure time zone has not changed self.assertEqual(cur.fetchone()[0], 'UTC') finally: execute("DROP OPERATOR IF EXISTS public.= (oid, integer)") execute("DROP FUNCTION IF EXISTS public.bad_eq(oid, integer)") cur.close() con.close() if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_dbapi20_copy.py000066400000000000000000000446571450706350600220510ustar00rootroot00000000000000#!/usr/bin/python """Test the modern PyGreSQL interface. Sub-tests for the copy methods. Contributed by Christoph Zwerschke. These tests need a database to test against. """ from __future__ import annotations # import unittest from collections.abc import Iterable from contextlib import suppress from typing import ClassVar import pgdb # the module under test from .config import dbhost, dbname, dbpasswd, dbport, dbuser class InputStream: def __init__(self, data): if isinstance(data, str): data = data.encode() self.data = data or b'' self.sizes = [] def __str__(self): data = self.data.decode() return data def __len__(self): return len(self.data) def read(self, size=None): if size is None: output, data = self.data, b'' else: output, data = self.data[:size], self.data[size:] self.data = data self.sizes.append(size) return output class OutputStream: def __init__(self): self.data = b'' self.sizes = [] def __str__(self): data = self.data.decode() return data def __len__(self): return len(self.data) def write(self, data): if isinstance(data, str): data = data.encode() self.data += data self.sizes.append(len(data)) class TestStreams(unittest.TestCase): def test_input(self): stream = InputStream('Hello, Wörld!') self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.read(3), b'Hel') self.assertEqual(stream.read(2), b'lo') self.assertEqual(stream.read(1), b',') self.assertEqual(stream.read(1), b' ') self.assertEqual(stream.read(), b'W\xc3\xb6rld!') self.assertEqual(stream.read(), b'') self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [3, 2, 1, 1, None, None]) def test_output(self): stream = OutputStream() self.assertEqual(len(stream), 0) for chunk in 'Hel', 'lo', ',', ' ', 'Wörld!': stream.write(chunk) self.assertIsInstance(stream.data, bytes) self.assertEqual(stream.data, b'Hello, W\xc3\xb6rld!') self.assertIsInstance(str(stream), str) self.assertEqual(str(stream), 'Hello, Wörld!') self.assertEqual(len(stream), 14) self.assertEqual(stream.sizes, [3, 2, 1, 1, 7]) class TestCopy(unittest.TestCase): cls_set_up = False data: ClassVar[list[tuple[int, str]]] = [ (1935, 'Luciano Pavarotti'), (1941, 'Plácido Domingo'), (1946, 'José Carreras')] @staticmethod def connect(): host = f"{dbhost or ''}:{dbport or -1}" return pgdb.connect(database=dbname, host=host, user=dbuser, password=dbpasswd) @classmethod def setUpClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") cur.execute("create table copytest (" "id smallint primary key, name varchar(64))") cur.close() con.commit() cur = con.cursor() try: cur.execute("set client_encoding=utf8") cur.execute("select 'Plácido and José'").fetchone() except (pgdb.DataError, pgdb.NotSupportedError): cls.data[1:3] = [ (1941, 'Plaacido Domingo'), (1946, 'Josee Carreras')] cls.can_encode = False cur.close() con.close() cls.cls_set_up = True @classmethod def tearDownClass(cls): con = cls.connect() cur = con.cursor() cur.execute("set client_min_messages=warning") cur.execute("drop table if exists copytest cascade") con.commit() con.close() def setUp(self): self.assertTrue(self.cls_set_up) self.con = self.connect() self.cursor = self.con.cursor() self.cursor.execute("set client_encoding=utf8") def tearDown(self): with suppress(Exception): self.cursor.close() with suppress(Exception): self.con.rollback() with suppress(Exception): self.con.close() can_encode = True @property def data_text(self): return ''.join('{}\t{}\n'.format(*row) for row in self.data) @property def data_csv(self): return ''.join('{},{}\n'.format(*row) for row in self.data) def truncate_table(self): self.cursor.execute("truncate table copytest") @property def table_data(self): self.cursor.execute("select * from copytest") return self.cursor.fetchall() def check_table(self): self.assertEqual(self.table_data, self.data) def check_rowcount(self, number=len(data)): # noqa: B008 self.assertEqual(self.cursor.rowcount, number) class TestCopyFrom(TestCopy): """Test the copy_from method.""" def tearDown(self): super().tearDown() self.setUp() self.truncate_table() super().tearDown() def copy_from(self, stream, **options): return self.cursor.copy_from(stream, 'copytest', **options) @property def data_file(self): return InputStream(self.data_text) def test_bad_params(self): call = self.cursor.copy_from call('0\t', 'copytest') call('1\t', 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, None) self.assertRaises(TypeError, call, '0\t') self.assertRaises(TypeError, call, '0\t', None) self.assertRaises(TypeError, call, '0\t', 42) self.assertRaises(TypeError, call, '0\t', ['copytest']) self.assertRaises(TypeError, call, '0\t', 'copytest', format=42) self.assertRaises(ValueError, call, '0\t', 'copytest', format='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', sep=42) self.assertRaises(ValueError, call, '0\t', 'copytest', sep='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', null=42) self.assertRaises(ValueError, call, '0\t', 'copytest', size='bad') self.assertRaises(TypeError, call, '0\t', 'copytest', columns=42) self.assertRaises( ValueError, call, b'', 'copytest', format='binary', sep=',') def test_input_string(self): ret = self.copy_from('42\tHello, world!') self.assertIs(ret, self.cursor) self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_with_schema_name(self): self.cursor.copy_from('42\tHello, world!', 'public.copytest') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) def test_input_string_with_newline(self): self.copy_from('42\tHello, world!\n') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.check_rowcount(1) def test_input_string_multiple_rows(self): ret = self.copy_from(self.data_text) self.assertIs(ret, self.cursor) self.check_table() self.check_rowcount() def test_input_bytes(self): self.copy_from(b'42\tHello, world!') self.assertEqual(self.table_data, [(42, 'Hello, world!')]) self.truncate_table() self.copy_from(self.data_text.encode()) self.check_table() def test_input_iterable(self): self.copy_from(self.data_text.splitlines()) self.check_table() self.check_rowcount() def test_input_iterable_invalid(self): self.assertRaises(IOError, self.copy_from, [None]) def test_input_iterable_with_newlines(self): self.copy_from(f'{row}\n' for row in self.data_text.splitlines()) self.check_table() def test_input_iterable_bytes(self): self.copy_from(row.encode() for row in self.data_text.splitlines()) self.check_table() def test_sep(self): stream = ('{}-{}'.format(*row) for row in self.data) self.copy_from(stream, sep='-') self.check_table() def test_null(self): self.copy_from('0\t\\N') self.assertEqual(self.table_data, [(0, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('1\tNix') self.assertEqual(self.table_data, [(1, 'Nix')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('2\tNix', null='Nix') self.assertEqual(self.table_data, [(2, None)]) self.assertIsNone(self.table_data[0][1]) self.truncate_table() self.copy_from('3\t') self.assertEqual(self.table_data, [(3, '')]) self.assertIsNotNone(self.table_data[0][1]) self.truncate_table() self.copy_from('4\t', null='') self.assertEqual(self.table_data, [(4, None)]) self.assertIsNone(self.table_data[0][1]) def test_columns(self): self.copy_from('1', columns='id') self.copy_from('2', columns=['id']) self.copy_from('3\tThree') self.copy_from('4\tFour', columns='id, name') self.copy_from('5\tFive', columns=['id', 'name']) self.assertEqual(self.table_data, [ (1, None), (2, None), (3, 'Three'), (4, 'Four'), (5, 'Five')]) self.check_rowcount(5) self.assertRaises(pgdb.ProgrammingError, self.copy_from, '6\t42', columns=['id', 'age']) self.check_rowcount(-1) def test_csv(self): self.copy_from(self.data_csv, format='csv') self.check_table() def test_csv_with_sep(self): stream = ('{};"{}"\n'.format(*row) for row in self.data) self.copy_from(stream, format='csv', sep=';') self.check_table() self.check_rowcount() def test_binary(self): self.assertRaises( IOError, self.copy_from, b'NOPGCOPY\n', format='binary') self.check_rowcount(-1) def test_binary_with_sep(self): self.assertRaises( ValueError, self.copy_from, '', format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises(ValueError, self.copy_from, '', format='binary') def test_query(self): self.assertRaises(ValueError, self.cursor.copy_from, '', "select null") def test_file(self): stream = self.data_file ret = self.copy_from(stream) self.assertIs(ret, self.cursor) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [8192]) self.check_rowcount() def test_size_positive(self): stream = self.data_file size = 7 num_chunks = (len(stream) + size - 1) // size self.copy_from(stream, size=size) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [size] * num_chunks) self.check_rowcount() def test_size_negative(self): stream = self.data_file self.copy_from(stream, size=-1) self.check_table() self.assertEqual(len(stream), 0) self.assertEqual(stream.sizes, [None]) self.check_rowcount() def test_size_invalid(self): self.assertRaises( TypeError, self.copy_from, self.data_file, size='invalid') class TestCopyTo(TestCopy): """Test the copy_to method.""" @classmethod def setUpClass(cls): super().setUpClass() con = cls.connect() cur = con.cursor() cur.execute("set client_encoding=utf8") cur.execute("insert into copytest values (%d, %s)", cls.data) cur.close() con.commit() con.close() def copy_to(self, stream=None, **options): return self.cursor.copy_to(stream, 'copytest', **options) @property def data_file(self): return OutputStream() def test_bad_params(self): call = self.cursor.copy_to call(None, 'copytest') call(None, 'copytest', format='text', sep='\t', null='', columns=['id', 'name']) self.assertRaises(TypeError, call) self.assertRaises(TypeError, call, None) self.assertRaises(TypeError, call, None, 42) self.assertRaises(TypeError, call, None, ['copytest']) self.assertRaises(TypeError, call, 'bad', 'copytest') self.assertRaises(TypeError, call, None, 'copytest', format=42) self.assertRaises(ValueError, call, None, 'copytest', format='bad') self.assertRaises(TypeError, call, None, 'copytest', sep=42) self.assertRaises(ValueError, call, None, 'copytest', sep='bad') self.assertRaises(TypeError, call, None, 'copytest', null=42) self.assertRaises(TypeError, call, None, 'copytest', decode='bad') self.assertRaises(TypeError, call, None, 'copytest', columns=42) def test_generator(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) text = ''.join(rows) self.assertIsInstance(text, str) self.assertEqual(text, self.data_text) self.check_rowcount() def test_generator_with_schema_name(self): ret = self.cursor.copy_to(None, 'public.copytest') self.assertEqual(''.join(ret), self.data_text) def test_generator_bytes(self): ret = self.copy_to(decode=False) self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) byte_text = b''.join(rows) self.assertIsInstance(byte_text, bytes) self.assertEqual(byte_text, self.data_text.encode()) def test_rowcount_increment(self): ret = self.copy_to() self.assertIsInstance(ret, Iterable) for n, _row in enumerate(ret): self.check_rowcount(n + 1) def test_decode(self): ret_raw = b''.join(self.copy_to(decode=False)) ret_decoded = ''.join(self.copy_to(decode=True)) self.assertIsInstance(ret_raw, bytes) self.assertIsInstance(ret_decoded, str) self.assertEqual(ret_decoded, ret_raw.decode()) self.check_rowcount() def test_sep(self): ret = list(self.copy_to(sep='-')) self.assertEqual(ret, ['{}-{}\n'.format(*row) for row in self.data]) def test_null(self): data = ['{}\t{}\n'.format(*row) for row in self.data] self.cursor.execute('insert into copytest values(4, null)') try: ret = list(self.copy_to()) self.assertEqual(ret, [*data, '4\t\\N\n']) ret = list(self.copy_to(null='Nix')) self.assertEqual(ret, [*data, '4\tNix\n']) ret = list(self.copy_to(null='')) self.assertEqual(ret, [*data, '4\t\n']) finally: self.cursor.execute('delete from copytest where id=4') def test_columns(self): data_id = ''.join(f'{row[0]}\n' for row in self.data) data_name = ''.join(f'{row[1]}\n' for row in self.data) ret = ''.join(self.copy_to(columns='id')) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns=['id'])) self.assertEqual(ret, data_id) ret = ''.join(self.copy_to(columns='name')) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns=['name'])) self.assertEqual(ret, data_name) ret = ''.join(self.copy_to(columns='id, name')) self.assertEqual(ret, self.data_text) ret = ''.join(self.copy_to(columns=['id', 'name'])) self.assertEqual(ret, self.data_text) self.assertRaises( pgdb.ProgrammingError, self.copy_to, columns=['id', 'age']) def test_csv(self): ret = self.copy_to(format='csv') self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 3) csv = ''.join(rows) self.assertIsInstance(csv, str) self.assertEqual(csv, self.data_csv) self.check_rowcount(3) def test_csv_with_sep(self): rows = ''.join(self.copy_to(format='csv', sep=';')) self.assertEqual(rows, self.data_csv.replace(',', ';')) def test_binary(self): ret = self.copy_to(format='binary') self.assertIsInstance(ret, Iterable) for row in ret: self.assertTrue(row.startswith(b'PGCOPY\n\377\r\n\0')) break self.check_rowcount(1) def test_binary_with_sep(self): self.assertRaises(ValueError, self.copy_to, format='binary', sep='\t') def test_binary_with_unicode(self): self.assertRaises( ValueError, self.copy_to, format='binary', decode=True) def test_query(self): self.assertRaises( ValueError, self.cursor.copy_to, None, "select name from copytest", columns='noname') ret = self.cursor.copy_to( None, "select name||'!' from copytest where id=1941") self.assertIsInstance(ret, Iterable) rows = list(ret) self.assertEqual(len(rows), 1) self.assertIsInstance(rows[0], str) self.assertEqual(rows[0], f'{self.data[1][1]}!\n') self.check_rowcount(1) def test_file(self): stream = self.data_file ret = self.copy_to(stream) self.assertIs(ret, self.cursor) self.assertEqual(str(stream), self.data_text) data = self.data_text.encode() sizes = [len(row) + 1 for row in data.splitlines()] self.assertEqual(stream.sizes, sizes) self.check_rowcount() class TestBinary(TestCopy): """Test the copy_from and copy_to methods with binary data.""" def test_round_trip(self): # fill table from textual data self.cursor.copy_from(self.data_text, 'copytest', format='text') self.check_table() self.check_rowcount() # get data back in binary format ret = self.cursor.copy_to(None, 'copytest', format='binary') self.assertIsInstance(ret, Iterable) data_binary = b''.join(ret) self.assertTrue(data_binary.startswith(b'PGCOPY\n\377\r\n\0')) self.check_rowcount() self.truncate_table() # fill table from binary data self.cursor.copy_from(data_binary, 'copytest', format='binary') self.check_table() self.check_rowcount() if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tests/test_tutorial.py000066400000000000000000000144061450706350600214260ustar00rootroot00000000000000#!/usr/bin/python import unittest from typing import Any from pg import DB from pgdb import connect from .config import dbhost, dbname, dbpasswd, dbport, dbuser class TestClassicTutorial(unittest.TestCase): """Test the First Steps Tutorial for the classic interface.""" def setUp(self): """Set up test tables or empty them if they already exist.""" db = DB(dbname, dbhost, dbport, user=dbuser, passwd=dbpasswd) db.query("set datestyle to 'iso'") db.query("set default_with_oids=false") db.query("set standard_conforming_strings=false") db.query("set client_min_messages=warning") db.query("drop table if exists fruits cascade") db.query("create table fruits(id serial primary key, name varchar)") self.db = db def tearDown(self): db = self.db db.query("drop table fruits") db.close() def test_all_steps(self): db = self.db r: Any = db.get_tables() self.assertIsInstance(r, list) self.assertIn('public.fruits', r) r = db.get_attnames('fruits') self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 'int', 'name': 'text'}) r = db.has_table_privilege('fruits', 'insert') self.assertTrue(r) r = db.insert('fruits', name='apple') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'apple', 'id': 1}) banana = r = db.insert('fruits', name='banana') self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'banana', 'id': 2}) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() data = list(enumerate(more_fruits, start=3)) n = db.inserttable('fruits', data) self.assertEqual(n, 5) q = db.query('select * from fruits') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+----------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[8], ' 7|grapefruit') self.assertEqual(r[9], '(7 rows)') q = db.query('select * from fruits') r = q.getresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(r[0], (1, 'apple')) self.assertEqual(r[6], (7, 'grapefruit')) r = q.dictresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], dict) self.assertEqual(r[0], {'id': 1, 'name': 'apple'}) self.assertEqual(r[6], {'id': 7, 'name': 'grapefruit'}) rows = r = q.namedresult() self.assertIsInstance(r, list) self.assertIsInstance(r[0], tuple) self.assertEqual(rows[3].name, 'durian') r = db.update('fruits', banana, name=banana['name'].capitalize()) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) q = db.query('select * from fruits where id between 1 and 3') r = str(q).splitlines() self.assertEqual(r[0], 'id| name ') self.assertEqual(r[1], '--+---------') self.assertEqual(r[2], ' 1|apple ') self.assertEqual(r[3], ' 2|Banana ') self.assertEqual(r[4], ' 3|cherimaya') self.assertEqual(r[5], '(3 rows)') r = db.query('update fruits set name=initcap(name)') self.assertIsInstance(r, str) self.assertEqual(r, '7') r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 1) r = db.delete('fruits', banana) self.assertIsInstance(r, int) self.assertEqual(r, 0) r = db.insert('fruits', banana) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 2, 'name': 'Banana'}) apple = r = db.get('fruits', 1) self.assertIsInstance(r, dict) self.assertEqual(r, {'name': 'Apple', 'id': 1}) r = db.insert('fruits', apple, id=8) self.assertIsInstance(r, dict) self.assertEqual(r, {'id': 8, 'name': 'Apple'}) r = db.delete('fruits', id=8) self.assertIsInstance(r, int) self.assertEqual(r, 1) class TestDbApi20Tutorial(unittest.TestCase): """Test the First Steps Tutorial for the DB-API 2.0 interface.""" def setUp(self): """Set up test tables or empty them if they already exist.""" host = f"{dbhost or ''}:{dbport or -1}" con = connect(database=dbname, host=host, user=dbuser, password=dbpasswd) cur = con.cursor() cur.execute("set datestyle to 'iso'") cur.execute("set default_with_oids=false") cur.execute("set standard_conforming_strings=false") cur.execute("set client_min_messages=warning") cur.execute("drop table if exists fruits cascade") cur.execute("create table fruits(id serial primary key, name varchar)") cur.close() self.con = con def tearDown(self): con = self.con cur = con.cursor() cur.execute("drop table fruits") cur.close() con.close() def test_all_steps(self): con = self.con cursor = con.cursor() cursor.execute("insert into fruits (name) values ('apple')") cursor.execute("insert into fruits (name) values (%s)", ('banana',)) more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() parameters = [(name,) for name in more_fruits] cursor.executemany("insert into fruits (name) values (%s)", parameters) con.commit() cursor.execute('select * from fruits where id=1') r: Any = cursor.fetchone() self.assertIsInstance(r, tuple) self.assertEqual(len(r), 2) r = str(r) self.assertEqual(r, "Row(id=1, name='apple')") cursor.execute('select * from fruits') r = cursor.fetchall() self.assertIsInstance(r, list) self.assertEqual(len(r), 7) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[6]), "Row(id=7, name='grapefruit')") cursor.execute('select * from fruits') r = cursor.fetchmany(2) self.assertIsInstance(r, list) self.assertEqual(len(r), 2) self.assertEqual(str(r[0]), "Row(id=1, name='apple')") self.assertEqual(str(r[1]), "Row(id=2, name='banana')") if __name__ == '__main__': unittest.main() PyGreSQL-PyGreSQL-166b135/tox.ini000066400000000000000000000021061450706350600163150ustar00rootroot00000000000000# config file for tox [tox] envlist = py3{7,8,9,10,11,12},ruff,mypy,cformat,docs [testenv:ruff] basepython = python3.11 deps = ruff>=0.0.292 commands = ruff setup.py pg pgdb tests [testenv:mypy] basepython = python3.11 deps = mypy>=1.5.1 commands = mypy pg pgdb tests [testenv:cformat] basepython = python3.11 allowlist_externals = sh commands = sh -c "! (clang-format --style=file -n ext/*.c 2>&1 | tee /dev/tty | grep format-violations)" [testenv:docs] basepython = python3.11 deps = sphinx>=7,<8 commands = sphinx-build -b html -nEW docs docs/_build/html [testenv:build] basepython = python3.11 deps = setuptools>=68 wheel>=0.41,<1 build>=1,<2 commands = python -m build -s -n -C strict -C memory-size [testenv:coverage] basepython = python3.11 deps = coverage>=7,<8 commands = coverage run -m unittest discover coverage html [testenv] passenv = PG* PYGRESQL_* deps = setuptools>=68 commands = python setup.py clean --all build_ext --force --inplace --strict --memory-size python -m unittest {posargs:discover}