pax_global_header00006660000000000000000000000064145213141560014514gustar00rootroot0000000000000052 comment=c64c7d1b8caec172c87a73154cc4da90fd3fae3b sqlite-utils-3.35.2/000077500000000000000000000000001452131415600142455ustar00rootroot00000000000000sqlite-utils-3.35.2/.github/000077500000000000000000000000001452131415600156055ustar00rootroot00000000000000sqlite-utils-3.35.2/.github/FUNDING.yml000066400000000000000000000000211452131415600174130ustar00rootroot00000000000000github: [simonw] sqlite-utils-3.35.2/.github/workflows/000077500000000000000000000000001452131415600176425ustar00rootroot00000000000000sqlite-utils-3.35.2/.github/workflows/codeql-analysis.yml000066400000000000000000000042771452131415600234670ustar00rootroot00000000000000name: "CodeQL" on: push: branches: [main] schedule: - cron: '0 4 * * 5' jobs: analyze: name: Analyze runs-on: ubuntu-latest strategy: fail-fast: false matrix: # Override automatic language detection by changing the below list # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] language: ['python'] # Learn more... # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection steps: - name: Checkout repository uses: actions/checkout@v2 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 # If this run was triggered by a pull request event, then checkout # the head of the pull request instead of the merge commit. - run: git checkout HEAD^2 if: ${{ github.event_name == 'pull_request' }} # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1 sqlite-utils-3.35.2/.github/workflows/documentation-links.yml000066400000000000000000000004511452131415600243540ustar00rootroot00000000000000name: Read the Docs Pull Request Preview on: pull_request_target: types: - opened permissions: pull-requests: write jobs: documentation-links: runs-on: ubuntu-latest steps: - uses: readthedocs/actions/preview@v1 with: project-slug: "sqlite-utils" sqlite-utils-3.35.2/.github/workflows/publish.yml000066400000000000000000000030151452131415600220320ustar00rootroot00000000000000name: Publish Python Package on: release: types: [created] jobs: test: runs-on: ${{ matrix.os }} strategy: matrix: python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v3 name: Configure pip caching with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} restore-keys: | ${{ runner.os }}-pip- - name: Install dependencies run: | pip install -e '.[test]' - name: Run tests run: | pytest deploy: runs-on: ubuntu-latest needs: [test] steps: - uses: actions/checkout@v3 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.12' - uses: actions/cache@v3 name: Configure pip caching with: path: ~/.cache/pip key: ${{ runner.os }}-publish-pip-${{ hashFiles('**/setup.py') }} restore-keys: | ${{ runner.os }}-publish-pip- - name: Install dependencies run: | pip install setuptools wheel twine - name: Publish env: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} run: | python setup.py sdist bdist_wheel twine upload dist/* sqlite-utils-3.35.2/.github/workflows/spellcheck.yml000066400000000000000000000014171452131415600225050ustar00rootroot00000000000000name: Check spelling in documentation on: [push, pull_request] jobs: spellcheck: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: 3.9 - uses: actions/cache@v2 name: Configure pip caching with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} restore-keys: | ${{ runner.os }}-pip- - name: Install dependencies run: | pip install -e '.[docs]' - name: Check spelling run: | codespell docs/*.rst --ignore-words docs/codespell-ignore-words.txt codespell sqlite_utils --ignore-words docs/codespell-ignore-words.txt sqlite-utils-3.35.2/.github/workflows/test-coverage.yml000066400000000000000000000021441452131415600231360ustar00rootroot00000000000000name: Calculate test coverage on: push: branches: - main pull_request: branches: - main jobs: test: runs-on: ubuntu-latest steps: - name: Check out repo uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: 3.9 - uses: actions/cache@v2 name: Configure pip caching with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} restore-keys: | ${{ runner.os }}-pip- - name: Install SpatiaLite run: sudo apt-get install libsqlite3-mod-spatialite - name: Install Python dependencies run: | python -m pip install --upgrade pip python -m pip install -e .[test] python -m pip install pytest-cov - name: Run tests run: |- ls -lah pytest --cov=sqlite_utils --cov-report xml:coverage.xml --cov-report term ls -lah - name: Upload coverage report uses: codecov/codecov-action@v1 with: token: ${{ secrets.CODECOV_TOKEN }} file: coverage.xml sqlite-utils-3.35.2/.github/workflows/test.yml000066400000000000000000000035641452131415600213540ustar00rootroot00000000000000name: Test on: [push, pull_request] env: FORCE_COLOR: 1 jobs: test: runs-on: ${{ matrix.os }} strategy: matrix: python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] numpy: [0, 1] os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v3 name: Configure pip caching with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} restore-keys: | ${{ runner.os }}-pip- - name: Install dependencies run: | pip install -e '.[test,mypy,flake8]' - name: Optionally install tui dependencies (not 3.7) if: matrix.python-version != '3.7' run: pip install -e '.[tui]' - name: Optionally install numpy if: matrix.numpy == 1 run: pip install numpy - name: Install SpatiaLite if: matrix.os == 'ubuntu-latest' run: sudo apt-get install libsqlite3-mod-spatialite - name: On macOS with Python 3.10 test with sqlean.py if: matrix.os == 'macos-latest' && matrix.python-version == '3.10' run: pip install sqlean.py sqlite-dump - name: Build extension for --load-extension test if: matrix.os == 'ubuntu-latest' run: |- (cd tests && gcc ext.c -fPIC -shared -o ext.so && ls -lah) - name: Run tests run: | pytest -v - name: run mypy run: mypy sqlite_utils tests - name: run flake8 if Python 3.8 or higher if: matrix.python-version >= 3.8 run: flake8 - name: Check formatting run: black . --check - name: Check if cog needs to be run if: matrix.python-version != '3.7' run: | cog --check README.md docs/*.rst sqlite-utils-3.35.2/.gitignore000066400000000000000000000003341452131415600162350ustar00rootroot00000000000000.venv build *.db __pycache__/ *.py[cod] *$py.class venv .eggs .pytest_cache *.egg-info .DS_Store .mypy_cache .coverage .schema .vscode .hypothesis Pipfile Pipfile.lock pyproject.toml tests/*.dylib tests/*.so tests/*.dll sqlite-utils-3.35.2/.gitpod.yml000066400000000000000000000000441452131415600163320ustar00rootroot00000000000000tasks: - init: pip install '.[test]'sqlite-utils-3.35.2/.readthedocs.yaml000066400000000000000000000002741452131415600174770ustar00rootroot00000000000000version: 2 sphinx: configuration: docs/conf.py build: os: ubuntu-22.04 tools: python: "3.11" python: install: - method: pip path: . extra_requirements: - docs sqlite-utils-3.35.2/Justfile000066400000000000000000000012361452131415600157570ustar00rootroot00000000000000# Run tests and linters @default: test lint # Setup project @init: pipenv run pip install -e '.[test,docs,mypy,flake8]' # Run pytest with supplied options @test *options: pipenv run pytest {{options}} # Run linters: black, flake8, mypy, cog @lint: pipenv run black . --check pipenv run flake8 pipenv run mypy sqlite_utils tests pipenv run cog --check README.md docs/*.rst pipenv run codespell docs/*.rst --ignore-words docs/codespell-ignore-words.txt # Rebuild docs with cog @cog: pipenv run cog -r README.md docs/*.rst # Serve live docs on localhost:8000 @docs: cog cd docs && pipenv run make livehtml # Apply Black @black: pipenv run black . sqlite-utils-3.35.2/LICENSE000066400000000000000000000261351452131415600152610ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. sqlite-utils-3.35.2/MANIFEST.in000066400000000000000000000001341452131415600160010ustar00rootroot00000000000000include LICENSE include README.md recursive-include docs *.rst recursive-include tests *.py sqlite-utils-3.35.2/README.md000066400000000000000000000121041452131415600155220ustar00rootroot00000000000000# sqlite-utils [![PyPI](https://img.shields.io/pypi/v/sqlite-utils.svg)](https://pypi.org/project/sqlite-utils/) [![Changelog](https://img.shields.io/github/v/release/simonw/sqlite-utils?include_prereleases&label=changelog)](https://sqlite-utils.datasette.io/en/stable/changelog.html) [![Python 3.x](https://img.shields.io/pypi/pyversions/sqlite-utils.svg?logo=python&logoColor=white)](https://pypi.org/project/sqlite-utils/) [![Tests](https://github.com/simonw/sqlite-utils/workflows/Test/badge.svg)](https://github.com/simonw/sqlite-utils/actions?query=workflow%3ATest) [![Documentation Status](https://readthedocs.org/projects/sqlite-utils/badge/?version=stable)](http://sqlite-utils.datasette.io/en/stable/?badge=stable) [![codecov](https://codecov.io/gh/simonw/sqlite-utils/branch/main/graph/badge.svg)](https://codecov.io/gh/simonw/sqlite-utils) [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/simonw/sqlite-utils/blob/main/LICENSE) [![discord](https://img.shields.io/discord/823971286308356157?label=discord)](https://discord.gg/Ass7bCAMDw) Python CLI utility and library for manipulating SQLite databases. ## Some feature highlights - [Pipe JSON](https://sqlite-utils.datasette.io/en/stable/cli.html#inserting-json-data) (or [CSV or TSV](https://sqlite-utils.datasette.io/en/stable/cli.html#inserting-csv-or-tsv-data)) directly into a new SQLite database file, automatically creating a table with the appropriate schema - [Run in-memory SQL queries](https://sqlite-utils.datasette.io/en/stable/cli.html#querying-data-directly-using-an-in-memory-database), including joins, directly against data in CSV, TSV or JSON files and view the results - [Configure SQLite full-text search](https://sqlite-utils.datasette.io/en/stable/cli.html#configuring-full-text-search) against your database tables and run search queries against them, ordered by relevance - Run [transformations against your tables](https://sqlite-utils.datasette.io/en/stable/cli.html#transforming-tables) to make schema changes that SQLite `ALTER TABLE` does not directly support, such as changing the type of a column - [Extract columns](https://sqlite-utils.datasette.io/en/stable/cli.html#extracting-columns-into-a-separate-table) into separate tables to better normalize your existing data - [Install plugins](https://sqlite-utils.datasette.io/en/stable/plugins.html) to add custom SQL functions and additional features Read more on my blog, in this series of posts on [New features in sqlite-utils](https://simonwillison.net/series/sqlite-utils-features/) and other [entries tagged sqliteutils](https://simonwillison.net/tags/sqliteutils/). ## Installation pip install sqlite-utils Or if you use [Homebrew](https://brew.sh/) for macOS: brew install sqlite-utils ## Using as a CLI tool Now you can do things with the CLI utility like this: $ sqlite-utils memory dogs.csv "select * from t" [{"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}] $ sqlite-utils insert dogs.db dogs dogs.csv --csv [####################################] 100% $ sqlite-utils tables dogs.db --counts [{"table": "dogs", "count": 2}] $ sqlite-utils dogs.db "select id, name from dogs" [{"id": 1, "name": "Cleo"}, {"id": 2, "name": "Pancakes"}] $ sqlite-utils dogs.db "select * from dogs" --csv id,age,name 1,4,Cleo 2,2,Pancakes $ sqlite-utils dogs.db "select * from dogs" --table id age name ---- ----- -------- 1 4 Cleo 2 2 Pancakes You can import JSON data into a new database table like this: $ curl https://api.github.com/repos/simonw/sqlite-utils/releases \ | sqlite-utils insert releases.db releases - --pk id Or for data in a CSV file: $ sqlite-utils insert dogs.db dogs dogs.csv --csv `sqlite-utils memory` lets you import CSV or JSON data into an in-memory database and run SQL queries against it in a single command: $ cat dogs.csv | sqlite-utils memory - "select name, age from stdin" See the [full CLI documentation](https://sqlite-utils.datasette.io/en/stable/cli.html) for comprehensive coverage of many more commands. ## Using as a library You can also `import sqlite_utils` and use it as a Python library like this: ```python import sqlite_utils db = sqlite_utils.Database("demo_database.db") # This line creates a "dogs" table if one does not already exist: db["dogs"].insert_all([ {"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"} ], pk="id") ``` Check out the [full library documentation](https://sqlite-utils.datasette.io/en/stable/python-api.html) for everything else you can do with the Python library. ## Related projects * [Datasette](https://datasette.io/): A tool for exploring and publishing data * [csvs-to-sqlite](https://github.com/simonw/csvs-to-sqlite): Convert CSV files into a SQLite database * [db-to-sqlite](https://github.com/simonw/db-to-sqlite): CLI tool for exporting a MySQL or PostgreSQL database as a SQLite file * [dogsheep](https://dogsheep.github.io/): A family of tools for personal analytics, built on top of `sqlite-utils` sqlite-utils-3.35.2/codecov.yml000066400000000000000000000002021452131415600164040ustar00rootroot00000000000000coverage: status: project: default: informational: true patch: default: informational: true sqlite-utils-3.35.2/docs/000077500000000000000000000000001452131415600151755ustar00rootroot00000000000000sqlite-utils-3.35.2/docs/.gitignore000066400000000000000000000000071452131415600171620ustar00rootroot00000000000000_build sqlite-utils-3.35.2/docs/Makefile000066400000000000000000000013221452131415600166330ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = sqlite-utils SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) livehtml: sphinx-autobuild -a -b html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(0) --watch ../sqlite_utils sqlite-utils-3.35.2/docs/_static/000077500000000000000000000000001452131415600166235ustar00rootroot00000000000000sqlite-utils-3.35.2/docs/_static/img/000077500000000000000000000000001452131415600173775ustar00rootroot00000000000000sqlite-utils-3.35.2/docs/_static/img/tui.png000066400000000000000000007220611452131415600207160ustar00rootroot00000000000000PNG  IHDR54,PLTEʧ۵x0Ч`j^\S$:n}}}Fo9srrVowgfeyg[KtXZ[O"baf_dMOO_W [\?DHQ Tv!GaD>3SS<:9B8,@2%82+?I Xy.KtO/,p5eHdGC08DtϽf 'ibՏ47CO1.zn6poLgPt G!BI ST`z,w<fzS)T%0Vj5/YV=0)(!EwF&܇|@/f&lyOov:!0at:kz8q@ Y %xH ڐBvGR*B^zqZ%%㟉 эѩ>SBZfa]F.&\{|ŔpVR(aŒB ƔH.PBf{q5\+*<%L [ҔȤ 7XL⅂zމã|dAp0 px!Fp*pB7h4e([]$Il"B.ǎe4&Bdp!#Cº@V~&̕ xIg8aN+/hzȚ *lyt0;1I D b9@n1?fOaRLMh/}$fo܄xai_,*e'ms'){/%a(a]\(y *Jif<(:!b†݄F,_Ռe8 9EDbA_(OP D$6z\ miX> ;p%R "Fp%H#8!P!1 ե:`6ά[S,KxC,]zQiU @7b=MѰs)!@⛕(⮀s|ydLwl4*vZ/_p njvÐ-Ӧ/`YAi&!=UB,YmLXCUAC CYJ %@pQ԰Κp(uYк`'/. 3͜h/@5tSdy' F_CǻqDM "a(cb <9te?bs sfR`OvXAH! Q: Q9AnN9u2E’c ܶq@ƹ !!x=N,$0,n#y.K -p9?nOHS|޻= &q aMP|Ɩl. M]3[]Y;'O8D/2P<><<|{Z`z}#JX&~[7v:ofoXΨD cb^:œR[SnJ{Jh/,%к 5Ѣ^ *&H+cO#v+' CF5u'ԉ T,_|c<13 ;a=,^OngбU'@c\`5]20 d] d;~LQHi3^DH/3rS(;Rr}Ѕ)GAv0g?=̧G֟<GL P+W͖s)%4'B ~(Rv. :!=PB A (H7>J+v) $FFAIAfȀ``̉u a-!q/40:H~7 %!@؋qԿR.(; d~J>$NcehaEc PЉv'0~Ga󠄌a]FtKe7ka[#UA ؘlQ ɍA& ­"k]ta4pS>b([~R4&HF hۧsm,o>@풯сnH￲~7*$,n7Z 5m$jR=,cL-3\y)!Ϸ#@;m"Yk@Z`5COp" TذofUl 0Ej :f|:x;|˓B*V5ՓRNkwZ[ӠWTјS}w D-<kݖnnPKjqBV+qُ[W*{`}KB})+< aT7SP*!W 5 # @l{9a(ڵRt||c'|OG mNR*&=pu A(YTm%` $T?0'=򯟔qKztS6e;p߀k# Ю${Hc_:gh',hKvfxAPh,5!pTm׈b% Z\L?DlIFy_cu\.s$%Dd ed)x2$S/{;r[1*eG/6d=i(H TN*y39}B hvƎ1XRs\X7J= f*[9 )/]1X7V7D4fEsA@o:+:I>e@b$1?c"-y(,~Xz|USUV%FvYjh͏yFi2n1|VgHӌF@bD6"DkX8 J$e{T?̲31GJp]W8wgonvux~ OCpbIP~/+$Xq"@`6{7dPkSbF/ߴu k#T`|̒mJFln6^@)D>)Aۦd+Cm`P;kQygCQnU:boJF@bf141Z- g]D;dfGygvjԈ~4mI\(R rjRٿJ.( >}7(Jo?%8c[m0*+G2 =o V!KH9A~ 6s[ dFzf)&'P \8V jn(!%AdB6TR5ӱsw?%{KjkA*`>Xa% Qˏ^:T.1$ Z  d\*yB&N'24N-尰6P{9^TS CU8E(": ?M8[;OH.#qB'u0ʰv|lC!0?Px ίAD戋M@DR8+ǡ/"tI!5/ W]) ZCQz8NX8k:Z7[/(AHdJR@og5ڔkN|+Aw;ZiDGT"rsM@lsBU&i>X'Par|L,{m1~NC}|qAvݾ]@c/He1i5piל80VR0ˊ-+_ "ȨmelJPZm9}\ Bwo8Ô؅2v'j^)b CoڡPDZFtބxhq50VSG`uxN:# qΡ*1ѿmD h1~;w9{;wpB[ &}:Ѽ+,jPGrcEB9Mљ"Z%첳d,S; v).f KpBv E#]/'\u޽);ᄻlNWE/Jߡ1~bfYwH&[qF%Wl;:P!\$:-cB%p,y%i*Jx)'#r|)N޺p{p>_mN@_61 wqjB<˻k<9}^q4YވƸt_COػD;+ OS'L` Z c4Kܣ k49wuic"c.ms=z$\-A.Qɾn;< 0z:+pয়)cy|[Vz` _\7{[ 2 Ť`U ]sBm ȋFHE َ@V'FNEHש }ߌx8'c>f|,۶mt p9N`_16)4ؿ?oS^.Mإwv{=O;߇}zjAZ{HAO8qL G< wm6ظEպv?yӇ`; ZC/p4l 3%T`2HPy1p,VA-V JG9;  A - >a *%KN2'yZsBN_ \-Ђ&Koɞg,u1n$Nv8e끠M$6=1^ǽD@\2'22M8zgNpG4ۛg§8Xi V>CNN3ĝCk; )<@HLj;ePϭq/\8 3G!6GF# Mg`04!Of]LIÕT)%Ow{%R˒,;J%gai-f?! &xj]p6~0q\!Ds9 dl &&@crP* !3#/Hd OsM m%">ga3 ly ">CQ |Ю@uĹNüO?p,틮&U7?v [ظWvi*c b~B;cw1A 0aqhvAm/ǖ b96aMic 75q`OO0}zB1a9@-`B89[gysy@(좡%L1/QsCo|%„0{xq 0Z$]O@TL8`q(純bfKebYfRN^]qH[eL_ ( '`^P\8?sq\7k "J/ZP%@i6/9}DK4eOr*wh=1a_"&v}/ӾxPf>J~P>bu?΁0ܚWm;z„n!& HG{<=!_n 0b`Z0|wzY& `a9B';,w]sXnG;=6{K(SIC$0(D"ؚ1Ds|S.q5[T&$z:.h..A:p:C*9K ySyrgkkbr5K#܆AAlV ez$!A``rcA*MD!ă0u;杖òQwpetyYjfyֻy#'4wX „orhc|> BrC8-]-άHB0MBl)M xTV"m ә(*'4dnjlS JĄ =X6'4C¤mm;Jv+<|ʵPbQ1Ť`ഐ4R O (#e QXh({<0Ou 6nyFOScC<^fPyjm34Fa 0>^m}V<\"S{BBVocd~}3 sΚv쭋uD=!\p M}ht N&Ė<2Ev5*%8MWBU> n0@t9P\,W<)ۢC6 Jw0&.~2)oҚ!6 >2o5 (X jL r`F)ukgO:F>Z  {@&`wmc^,.BN?aX@:>9gLddI6uv}lc,7Kw;[G{1X,xX6ܙJl6 {T˥bO%.^܋$B[{R:ȞHBLPvDI h^Ԩ]L%rK.`N;H:֧'Ye@i=B]P2 Av3x6=iQ|h$ Ҿ?x]@nj \!1& ?OdS qg7ıubFR!oy ,&L^ZEW=)2n*>OVjzX-MuL0:idĄLN ʞ5ܫjrA.HFI7; 8E͔#ģJ &۪}l""b$Td~7j Rwa'^x8P rH=C AB!KJطiՊ|oIŊlGhPAצ/3(V8Z$ȜrI Hu/uKF0"pLې|ζlHB"& usB)$ o>lċKDUL`72+ʒ"13}[Ia 4bH :#;XkR wdTbƈ,ݳz d5 + ҳr#" > >KЬ]uuOKWAuUBW7-{- IW@gyca8ԕ5<} A-F.$@ g EP>L߬ftu)AȺXА'pg!!NlC.iW"6k+)~\ݪ(`8ݕ%'9!'dx%,H$]R['#Ŀ(1Mq\%(z)f `0 D$ *08DCHh7 TRu \$@W].4bgtD1ӳ&Dqpr*"0B"$ eWɒds v4)V1(H}/d$GrJ(Ye+YNx r\"PImMT jȶ85 Nul9ya}-5eH9Io9?p&"5y['n͵1*הn {LSSŃ r263v*OCH31lq73ݘ( ..qfGZm ^El m\o~ VGܰ/B{(І;r+g_>|z77iS4YIG黎wRӷ ]E tXT4T޹:Ytlw ilT$C-ae_v7 <&9ѿ%m~$$U-aqV.Dq+߻ˋdԋ A?ymgd5$H70 A/ Mt1&)/KLbNulѓy7{)aT7M]7UOx.,i*YTc)E:Q|5\ (db=B5S`m֤xJ v? ϟ "?xq3{Cs'x7+Gs;0'X#;L~Iߗi.=F (=_\?뛧w9Őw53y?/,1' d8i]p<}9lv;T3vW6l 'l?Pǣ(PF mc|Ma'LP<}F}jv͙mkUSPc'L gPs ߆aL5$L"ϔ 3RfH:uĪm0@US)qdM4 Cq*Q^*v1%>vz3]gh\k {onq(Ǩ(#$V>f0x1X7 ͺ5m`25"YbVa|Ńx,dL׌k7G؄/-a4&)L@AaGan+6BC+F q3B{ HP=y  a/_V& Ұ'  ;=kQ1Ծ[2IYF \ɹ ,LFʠM9FiGӆDuX~ %f"[ JA#? 3%LNSyA2MfRkݸ~&|Gn$+6٪.m  yd ㍌'P{s4(}({ ao ]*@@j %'jlI @DvR$- e;A-Vj2$<H _[lQCȉ!JL%H RWLArBUՎb|SH(1vj48#L9&PVD]9)qEeVCg^= *d7ʏ!\aN"Lx\"P *DB">m +6I6N<2A&{#9`Fa ) $pj`-a8HՌZ 3HK~ B0(=CajIpbݧԁ"m.DX7Mfgv};^k&ڂ-.%n_]EJʉ2&RafL#Xv&|q 4rB?U_VU\n8`Ā + \D( hCsŞ@0,`İPJ`t[2"Jxշx/P'ǭ [[ &$L`LXAVϒKXXἝVUb2&`Lڕi[W h0wfnMd,< "jP8,1|N *#,bޜo{ELX!w0& ` Srz5|Ѻ崬`/uP0 [3:sdÀ˒oַD&J C6y m 2 +&Vy]~e#ѧTAUUM@sidBȘPD&Lf\5l:LŢ(J$<4'+"p&"8rX'Bb6Ad'%35POM00t5Ǝ߬+ےM?qڵjC1f󻻻R VއC.!dy4.d: `j-0ql%HnyK/&쎗lf,@m6iZ0fD*@'>m:۲ȀxHWf C{ MkHX5! \:2Կ^s+~M(tlnJ' 7ˤyU4q 4 X0q[ʪg iS@lܢ[)Zv5A"8qfAWEEKLBf7 > ЈlG00#&]9"@p w;z G;Vhnk ݂Gy ls`ws޴1ԻDgLBή*Dfb A*9Cʋ v sL`ypAeP(:FaZi SNhmM:6-&}OԎJzxɼ,$]6n+pa'D`;vֈŧ ^R h gj*"G]p8NL:pH vn0iRpX65| m<`NaG><mؒm5WՒG0rAxBeD ˽a4I} bJs# P#+c69P*M1#rXpW NnThBF!N`>[KXD6s 'BXdyybH9&"}5A,D0 “#g +FTmt8aiH M'2A&]Ƃ^k*b Q NiAuETZZ xs¡VrU[iv."Jhgb]w:lt\]_$CֺVг0)gH(0鈘ח$(/`Q!Tʄ6Lqㄭq띀J4#aA@-J0 ɺl) RޡazMy J g,\XǬž2ѡ'7 ]"4shLMxd},(SK{C ҦBF3'sWkX?r*vV";G.DB+o{T I>[aZo;,1 pW T= aWĜFANƟ+BF/A$s0$QJsQZi Ձ\CCj?j&ђdJ>ueE!e4Q&\4K.raK,H4k (v t%n%!],%bokk̺2 CP΄>!! { }O1 q&A}Pb=u "ayDO7Lo.A [6?FmusU^sw|d t #0jǦO0()mSU4 RZ3E3!+9fU'HX2kNk"^OԇF(mPuo" L y O}TMPI. B#}<]BkQκJzI IA2|Ed\3H83y&ř ~/2: {2e6*c:0 CB܊L`b*P8MȞm(f/@`I @h햍)$ Nw.-0eryX9 sH HX;(F"\gUm'H(d$-#m[ժyQeAh8T $y1J("4sRB)yCU"qK ^c\BC;eUt%tHg)j%B HREt+{h;p6xʐ>̎7~|-',Xc Ѵ;ك]DBY( -N:( k y  Xq(oNH(ONSPXꝑ.A;F1bD b֬yKa  V N(YC@{ <; |M-l~W3-`FA/)m+"(d1è# IY *pFpLnE'.~s 4rQ48$ 4!.?qX-|`pz|B 6Id~u i  …y Fs<8ݟ3T'˲Y +gr9|+1 Ա1 N#`&O"$z p% &4Vz_Ñ#,e^ЪԡvCL\x(X7PPl 7v :6a4*}MAn>t-407Xas&8 PJv 3bTFhÀ0>(>4 ^d4:x"sC#_S/9[ԭKc><>]V>u4Ooe@Ue ) n<0/rpT(i%I7/I\eOv5l . U2"xֈOuגA^^p+Hd?(軵p"Tr(6S\Om$!G!0$0BfmDp$.!3 LV (Ym6-~fEWa' J0EN$*X$SsŘXOxuF {M vƯrգT@hКZ)X ?N NVA Lp*4k8MYn; (-miϸ|x>(3 !5ƚ5ElħɄZVrIr=BTF$I@#}/¢Jǔmb]B-!XS`BNP"aZu1&`I!҈0.+ !?Hk rj!,t+a 칄g= KGeΞE G?D$dD@ߠ NラspzT#h\y|G*)03y{u<iՋ:9B U*zYHV P}VFcu8#+_:L%s{k=4Ə_|IӵD!tc=JgWi*cjB~QD( 8:ԕ~3U%TG LBH: D"=DFݲޔ:$\gjb%'pQxiuˑ6#h9 %.RCL[2o-P-up5C(a_ݻ~ |µ=H$.1^ f[o4-d VoB4r!@_eBU|* >c3q/glw`sBaL Uc&c^\ض'W֘Bx1{ٌk\/_.kX/hӜJܴY׳3?PMO GI !hfx4&ጄs5~)!c<#8[x0@ph37;:X[Y]kwv!zۀa:. n{k!c*'(~oh~ީ`:H[H8_O/ ' ]t:|ABTNwx+r^}_b[Wh(BE/LX"y)1p@}Uւ;@Dg2'&HPZ„4ٝN؜Na2a Smhw"t ! xu!BUF VAHªĞiB 7VÄ) 6EJW@1 "6C7j1bXDM9hMmIW}"PL@|!dHB18HYyn̐2јD3E{`AA?՛W/#oO/ޝ6~O^azs}5ËWO=]L?z6KX[V:a 8/ɄEr[ۡv]d|&`|L`_tY Ӳ_͟ӯׯ^wP5ጄ?|Id5lboWڼzO?~oDKqx3*ma& *B휃(O2Ly Y T$֕K&a|Q"V {U\dvH2I(a)8Yٷ` $0dcg b,frXNQp Q,, $jNXu!Я6|x[]v !,&Yb֞͢vPLh|a"GyJCT`;7Aa#ړ:<"[yNa}w.kxS+=h >w e!?!uYeפRho6lPqZЂﴺ|FTX)P0*y:xl,|2,Gh$Sb?\TA1]o$~~gGMG@z4\H #zv_KS3CO,BabB0!Ю?*Zs0#IM4H3A#SvPYh*X;RUN!s5~-vP%P'1Tpaum%`*LA+5kk*"̈́r+s&˟A?ZJ!}zNqϋuBu݂#4rh wDLhߴ,|1d<* [ d1}*eެ@!ƫVL4";n97MYjND5LBA/+?VHu&qYi|shDGBCQNh&, ýs;p8 cON30I0ʹ܎2n1H)p^VLCb:0G$!̀1֍6iBi>"Lb\8!z^wܫGy2Q)oX3֙,MxQM!Dh- npQ* fBL SnKȉXLpB T; f"$ $T0\`"4 TX'yKwޢ0LDh[NjJ!:e^\a]&R:o 3Jg#\#ƺ>3ᡇ;51=͓\<ҳn'SBKv7O 1֧҆*W4\S  qV D!&Y$RuL@To5sCEK~=zgdl{e 4s4|ya5BW:YcN!F 8 U #% 'yI}0Jv`+_^.o*:7Ȅ׿ao7-b0BUBbz2>g(y ${S-DŽ:S&B^ϫ`9,!@0=<osM Ļ j+w?串呄B%u8) <b&R;yOFi=1>ߗ`BA;'krxF2 QxQsl0Ϻw0RУfT=x^DGq!{Ӈ5G 0Ү @= е8,C1c^O3K.(p7QMܳ!q5N՘[4Xڬ @nLLBjE\F3LC,Np;\p|[%ׯר`+Ѩ8.ӣ6d(nΡ]`.ms+kj1eW#]&3Œ2σ0ۗE\^HPXWju.ۭ BcY}4*OFTTI tN{F;>v .n+0!:?R_1# _>34>ا Yu_?Bv `yL(P`X]?,&h $@={ ȧ<2RU2Ol^)"3UHQyf&9u]0ƒ t(伂w $X P YU$5>Hf!!p FBkU"zJThZʊK˷Wj,jQ!1ރgGk 2Äh^  nI(  p5K.juɕ{:7:$YDaݹcYv.ሔ )&~ś0*=9]]+}{{?Uvv+IV4R4EHHHhUň\`OpA9}ijuL02QJ*.g$4Zjm:Sr90D# ɀŰweiJ/g;N>cR4"v!~P(82!vLXܤ{_ >+9! lvB!vu ~(I5Ayx)lU+d2[T9֋`8 w8>s;ȜCSk%=B4 $A?ԯ>x&pURi&@n̕\R~ 2M#^tc 9x M[VosZF1OB'l<&P\M})e[WaשT{ M. E.۶[~B uBQk*PKC wh㚷'LhZr @-zt?6 W:h]'^Rve[ ;I }yg@L?Y}˄7`,2,fj -B[oFj.}̿X b!~"uv]I{+\uPKYx1="\H8W%'cx*@zEd/ {g_)&1®ఐR. |9!r9X" 3}=ɩT tz?tx u:p[ƇLJp~Ä/G,j 8KHb & BьA_-ee"M瀡/C:&, M:;m`Psdhj$ZvH,{2 Ig6&WĐ$ *|w$#|88-pS[Lyl$Qq׎ I`/(m('jx-}Xn zx#5ܮSİc7Y$&0$xO;@ZM4"cl[R*+H)ʜC2!I"wiT0a d?~r1NBQcц1n,Bn?&j6E 3 aLy|@kpK&X!n|52!lzLW_Qr!(Hp)O>B|YG[b2$1H _GK*hxA^\W9l/LG&@KEOЊf >vhU00RI'ܟg5[?]Z;+ٻ'Nܳ?ir 2ʛ\&3 (X(P 5PܓL (NL$A$\,`!Lz4tnvpرΝ?XW!zSו9UUU臄Sx8{EvR&%a?خ =I8+k)IT 1NMz3aZń0lS davLS)wL ~AwzVKC^܌q%Xbh`0>!5Dazh/ ӗbA1CcpD(ز.K#惞284˝jU: ievq !B0;/vܔQ/$."`BO:6"B|+# gB&  ؖ@TK@L QܫA4VXDp&Fal6Mv}RMQ∇n`7v6 Mv7 f[EҠ]V*L3 Œ76! R0ЅRݩP$d:4ᒃ*^VLз0=m7ѕ-G:[\ RxB?ԨiLاl8~q95n^Z9sBV;a4oxI$<B%Zy"p/FwP!`Ȝk\\= [ bQU#'$G Juj3^;o:O<;K& >5_ꚥ  &/#}yåń,='Ą] C `= ?19P"L@T2ŭE)nC"y18J`0!X[ &p~i}mψ6>􀺃w.j}zT@0C `hVΐWEv9{iQ5t48;­7'c+T[u໛D1Y>y9`B/zG[ $k*pKEWP@e!*ĉ`XOt̳w}Xvo|0#)rՂXsc-iEaէ1/:0h /"8F–VCLvT,D͐=0~Kz8ejPW@́?OA-`B\2_9OtI!Mv#`L,jHb(Yþ蚺CSWn$ GȒVJ&J2bB'{6 6q[u@0A &:|ZEW  ww@Ō YNTj'$!)2 NwՙGoY8Pqn9/): ]uʦ9Cћ7НQ׳ `B*㉘`8/ޱ8k5_;T?M>:?4 ;A@E[J 'mϷ8PW  :.icAmV`5@%_)+,2me61dCĸi;͛v L{3lD)B-$$I$^`Wܤqcͨ_kE*.K[! !EG__騄~JHa4x]bԻEUl[U|}[5؂-sNeE`]QuVj3J&FwF iQn R L[ MHbKCZu!cjKzq $Y䐼] cޟDViE"g4qE髾N,!E`BszCYq7~;H!=25m28Yw Dw@-0}+'Y%r^ .w-L^^I;^,`4W5߿藴%Earwx;0;J7F[N ȹӆ&OV>Ǟ%#\96 ]A}۷H IF;n;y3h'@";X'PLa78L(p6 |2O_q=b\@dz_?Ͷ68롇vGֳÅWaVd e:R}&xCϲ15pCͽ¥g np9P8 QcfGTu MKI8"VOۻi<qU5T<iI rTbZ8{s;˛ q%HBQi0\G ,N1OGA4X7b)\gS q/}lNXj= P]H6"0}y4J!IKH|=Lx:ކ}6ilΘ d,R4(4(? ﯿA&LuH9׺:VQV*gԃ-L&@yȫ]+0PC$l2Yw*;i[q!9fCL=M`P낪fXx%9% RILaI,Ph&Vko+>`kaGf5%-UNT|A e?̓i[] QT5fϯ ?k3^,Җi]EbER{kA|0A VTsoǔZC,$tBxpN%G~F‚|گm򪭻t ӭK ֶ1ƚ"0?@?c¶J7?axP`P2U쩻o>%.[%r^-cU;AAJ m$%Aj|n$n%=6;aiES&l4'W8_4ȹLQ:j;B?( mi XyH0%.|.WGb{hWxyBk҅X8ϻ*=*TbB,1)3[q #yd>hC.࠵=&΀}33+޵v_("M& nPk!}]B;]%`tNw^)]T8V(җڈ |;=Ƅtd Do-=pI" rNyޜ%c<bgG\&r Ą.$pXݣ ȿ(*TQ98\|fiC-M\(W֫]m(>Yo9H Ą`L]М@~i6O@݆& l- 0;W.d1}ꂋSwh\ X5T)w܈ $("R &vA5v\i?Cl|wLLTٹ.)3iO/=|t4Bk챰æb6f6<Uӛ>\eS7S)6`cа K"س|9""0a}W>:Y+KB!FeÝ61 lm0>h:ӟD, _yMWی[ #{//RzR Ϗt LdBJ#3a\dS`>dJDcX#P "~׀]Gd]/9* ( V!*B $K!:#yXo!ާ|?(8 &DD?(  >JnQm]1\8}P)*¼ApE CH&0NF2VEa=LX.yKU8Ăr@ sƪ:?f5 azL&OR80sś=5Ƅ)^`0^8 *9 ˞} ONZڜ K l& 4GTk`Ax@ A$%Dk"D16~)fB,,#B_[!BȺQ |y1?nF@ a|g\&l)Sa勛"(N`$x<0L 9@~vlW8(Y++X&&wf s_sHx= ;㘔 =Oc7s|m.[*RI-<E.޳=ְJL0m`Ґ"A=TkD Al@LҾJ >āے:%bn_(k ? 6+G GbBI~%w𢩄U~wfLTt R,I{*o7& lޚfQq4 ظP0р9^cB*-1Pz"+V(6€w(DPQ"D88 A#5Կmrc?lҶ`M9-r9xX۹Iy 2^C:a#e^ao6p'3"ܥx +&Dp~n1&DK4.fu5/bPR$|LJJ(3VÔԇdH aⵚe1ÜA} }=a9q6qN-kcH(G(2xգZ`"v|L(&H@e* "ƭ'E D8]RaUE`|kGex(k ,4`9ƥ@ȅ]&Jy3km*&0tG @حt WEH "M!#pUfSa `TA(9P0 S|>p4 Ȱ2mR&$e>0vq;wƐ{{2 LzZ:VF [1PkK&/2؂=*łfC I!@`9fFv9lv(,j.:n=!gGk̶6f2sجtp.-B }` 1dʄo9|&2&NLHo6ͼrSapAp k@D@>pF `\٘w0%T,)#0 cm;f9ǀ PX^Bձ*9!]&>2@P9dv9gGh6\s=+7W9k4WI< R9#R@ ʝ ?׆@ ea%Sb%2^}y{w|@L(,v=m :_w Exq %6L sp ^Β|Az8ThB+a5qAFy0 @EO2{ҫiFM8ǏyHoՓ|Hy|ZbBr6Q3[Pi 0Y^)BO3 $h8x?9/0aQ*'5s%:az]WeDOϘΰ=_"Dzx$DhJ`@\*^~iQ 濺O!d (>p;vĿUJYmI d"20p7M'juaa7giI\C ԷdBXLKB(Z^:d)yǴ1A0OF&vlBpBEHF.[z1l'"<<AaT&qHACN#1@Gi(p|>%3k埌&``Ta^zv ًp$גJYt Qy΅k"ೞcԽݽ.Rk]jL됗XPaM" Ub ^yr+l Wwݍ 3p,q~RG02CTctJ$ˎ_,ep//`% PBIj F8 z"ᆫ6wU7( 2Fv膱 !"Ɓ/bzog#ao`"@%*|Ec(\.E*8 lw\>aُg@XQR5E+:/2LIxFPiLhVBqp1Y x' $xc}86xW|,x,zi)1GB:%/*|xsW_} @^~4d{A1Af'-bYjy$Kl`=p0s)H%кVoLLJ&t ̵K#5N)BF麭㔺x$CUb"4uv^ q3.hp0䰒]㡅!b3ʡ496/&<W&BǛw[GBhm`/lׅ|O+k-^B/Lpf۹52'j sJAp6^Ɣw߁uBOXRIM&fd0A0OUba{ E !;<$dƁ pVs}< D~|3On>6 3:J3a uҔf~+ dz`;:2P Ż4sHU 00xCy+H7e}:>A'&… `!!2Lڷ.|߆YPoX~p20EBBv3 /#!:飺xsWv\' Xd9>Ґ:n2!V\ix~&i64]E;  /o O#3axN:p'L b )Kbt2ik"p ܵ aKmXŹ(-<Ψd+zi3TgD!ZksE\.tUgkJ 6E9۪]Ig$&ԖK#'pp "KkL8h#[iz2 Cߤ{uHٵ15X+U^mgCr PXcPh! O47bicgZ? (`&L)<`08f+v \FNtzMɄ: !.RB )~^T]NrFERf8ԭws C4UQ Bh&5nk^zJr&l`9C;HOkV4Ae cR"EBaD˄eC l߽AUn]1; ́[Lxv ZXjg 4H1 %_g/(Z@!5T~!qRSPbvt80u)uƄ4%f$nA" Yl@9 ?^BBX" *, dO}2΃@EC/IdV8C:FN!љ[ bʂw^x-s3.,nrZN2TKƅz l8oC]V"ai/z#hP3V  $)a P&Vqf;&$ F>wa5@ 77>z1x$Q TmƆ@'hKPM`2p~:7NL}ͽiJ,3w,L;ҭx. TQBi9Ao`p$Ĭ~xv$p&Ǧ#kxxff?0J1phr@$ziTelo&Rt],  s5l`;~(=gB%h|QD"XRӌS! ~?"5߿z}`>7LlCܒXov&H@(HS⨏zzMm:fӔR HSc}W7d i꾪жFI"~U窲/L 7YL\nwwf(HhY ؏n+m3\#ڐ_Ɛ%+ט^4[_.t8co„L8% ŜcJ9$fiwf ()CO9.c!Lcog OiD%De肏S +8+t t+Dv)u{4,`,Tg6@"N9)xbɧ p.YY֘Bt_{|#T@ADqݒGR(И,XAncd2"C^q zV/] 乸"n5鉱* ]hjø  O"Ae3L1ZE!az!i9',8t'"`O?OU)ͪ)9ܮLX2`9F7)Owăi@9)NJDZi@ƩOEiQE#.0GW` `8M}mHלv&oʄ:A,j)D0zkbBH 9#لpZ7#`߂SD݁l9y򧌂xA2Ðۭe8VdZ +vW覌K$V $l w sXdY;0q@BU aiCo.$.Le '"-iyIwhT`&P>d>3Gu r&>{Y1Q.#cKiGBsކ$F!P q\J` 5@Ɔj  x M cP4uuD=.}*\ٔnhԽ:OL8K-ILM{ (;91*c/*e? +XΡS/KȺ"ĒoQ*N}d8([w&Bz,8x`@`mU &) 4#vl\B#Ha @"`Bƃ >Pq;XHLg |֎j1[ɣǶW(t%- 4x,4źNm8;N*!tv#')%ihd()WV5% .6d& rʬ}؄M9PxS=f`eDH0؉K8ޖ6!`1f1#tDf3E F ؁wcs]FtaE٭H(,:ar*aBHY=0rU1t~s3@?-k1]ҳ p*b2`D`A,g.$]*DPHt$ RZ{LKtD#dEЍ^jP] S&x8o_4.V)<>JYK-s$*Zv ũe8XJ,!ɇ0 G$B<%Kg$&^ ]+>a:ڌ*) 32 kn-}d8$N&HXX;nPB&Bu'rWRv|"*4-s +@d4~ b\'<;Y>xg`PTC:RI26f+xH0%`P0h+:RZ)H`r㾞F@a80" Cr@qn⼄'xg/B?0+&tL 8$Sghe]n7T$ĺ›r$V'dM9|UIyPgb1I%=JŋJ>zNK"@J-l%|/V"Ȫ(U[~$j aOJjT!Ax'-,MQ!$-P'qC~'H`>ADŽW(,o{o6L(Rn)*q i,kw`úN H(H20&GȀS84G4^rA’ >'!rc#0nk(L@m?z"tZB4Jhr8lRX!D!r2.aLb5> #$vlY. t`qJBb3E[8]WUpd.I8& D\)MabgF-n= !d&r|P}R[dbמ-/N-Eߌ^]: ksFK$: #|BWCJUFL, W{g g],f_R8kc\Bv7$. 5~/}b&>K{nO'PZޟA)lT02ICUlf)/"~x$hT #„VF@~̥K XPf;A+HaQ,~q 'BLX@ߞwC& Q0 `B9 mޚ]dHS: -ҝk`2R"XH$j7>{Ѥ;Os\7TU]&@ϞIΠ)g!^#VDB&d`:JQtT Q#j^I ܋IW+(A H2X]$X PbH{ (Hp|(^bc$xXi 2CB:LbY62үh0*`6,$3[P \!r Ie"&#ك=H5I=8?O9ŀУ(^p8byׄtp -5nJ5B[@RWĄ%xL,{̼R,sJcf<BƬ=%eÈ0xPH@RPj;xMP5rvEq6`/ RB&.2F d߁atWSrH;eI)M$&B},Hjcj GŁPf !D) kP8$`j>T AVD7"B\Rn/xsbcL|KXkΌggn!J%jdi/m=ՐPr՚ րż0l+#ۨDhD(ӓe{k0f x!J"MVCjs GkH)3OY@do>˖  㮕><.eTjaB.-xhLG"ǘAZ!N%`F؃^CP/v1@+m%c3.-ލătĄ̓#L ኆG ˎ]7xW#@i臮訝o?Je3oOa5 #l <(.q VHwx5Qb)74 Pm#^Š({C[2b̆HĂ5"螷4|  7@fQӅQV3Dh[)`i2$`iec^ï?{&B_5 J[ 9՘}UV@Vpbӊn6,~{i !Py_juL՞@w z5!(LH 3֯nڎ7t&&.CR^/&C@A:}i8#g!(\ 0[pp5֯Y x'\&śt]J#SdPP͗d%7EDP68(zЪp`;ArTD=,E/C!l!윃ZOD&2~^&Lh/W@ٯ;ʄbmd@>[Kv*\  A0ssa2lТCOI說&-KSfWy0p0Aq ;>r=48EeⱍRN %A'2`Sڨmb&ŪB#A$H,:/nY%$'YJ)D7cEuTU$MGM OXmKZe-fK`a6P`xeB,t+IxodV@= Ô?CB TDL ӑiq"@80xs\6#FBӦ8 aK xPyyVe1i@1EsMbm&TPh„7\&ܦ際Οn4߼%+c/gLk?ˆrzI޽{Ca˛wzYo^>+]O?LoE@"PWic_nh* y2(*  oB&rDmRv\. 83 Y3htdAМ Ug',]Tm2%0MALEg'.OkЫ"avkiTUWZ_ f/W &mSA h@.%-K/c6Dd EЛĞCډ@բf 1+2a^h:0a|Y˻huT$*MԀK'OHPB[\;e]"dK9Ė6U }J"a#M-XcM& #OS,\*?1 hCw;ZhM3D8zs6 Z>Z:z G=~c<=~'i|ˏ=)D{:EI5MYW]y-+T+njΦ2U{VSU8n>T +~wCj[@s[i*dwO|ؐƕ֊V\tۯi3=ig,N"& Ȕ Җc̄OoP„Lhjι2K].hyhơ 6p|à]%CYY?+$% a >8:-Kw׈Who_tt]3LDF6LX*xأ|ꑄCRe^p~D//P d_BOK=7Y`9UU\L =-6c'lf4F rew19@'_sGdrj˄UAQt11Ob54GQqo-"7<.H O,xc>4("lO?󳴃yɕa϶݂[Oō';(R%&AJcZ\³W,v0# (^LN\J.8_?~] -_~[/sǟk-Jm-6W9`0,0}B&5&P $tq> *`2`$d54D/1э1}p\_RO>YhO^^Bd(pOح%O3XS+b!0l#fi_ɹӫ=_=zLJ_}Ûl=|H̄W]@r>48Z}S=ķߏao}~meyQa R3yǍzfNME@R$7D4di$hlU12l/)ĄYz__|G?gJL8o]o59l0s/R }}I<ґ H"Qwi3IHD] SE]58g z ^,})ժEZOvdL%J $aU+ZJO}I" P"xbWHbFt# d08!)=AY@W7RaV9^X@R7y' \S!tp%0s:A 'yvp]fԍZs<@͑Y $npBA{` muJqWhͻyoo_j ĬiS*XiOv JI(O "A z9t5/rcOwpE #hI/37'-PYop8z bx;{L'8.Ó*W44+$WAC0SCzrFpYN P= l繓u Vy(97#tQ!ҵhGQT}l$ǶY-IЬwQ82ѲML( ܾ~7;5o)>aO%Gz _N FNǸ62M,C8$>%!@k\S8  i*'~I] hlI{Jq_#wF"])E jZS$jЅP"/W4U'<6Ltrbß`a&-;h$uğQݦǎiwzńȡI76]pBNhv80'YkYEMaKܖ*νYO*kjd ~? e@X@PEaEOp(=Hsxx$NDk9ba@9s·(h I,MDOg' L='.8>p#`3د0gib|?XQ\) kA6`WPP Zs^+-eIh<#F#HQ6hgҭSS7-tmmNZC)`s=ZӮ~p=Ov״³#k_IzBVМ |8*g}ls8ӜL盜p9k%ؑ| !H`Te9'xt:H&rF a7n0"xE)]PO ( O24'8fcDMalaB{5f5"M`4~F m4mr!t5=hPCi'Ҷtl4O@bҥ,Y7k)IVq(]fQOYsֺy$*կܾ7m>s}5QKM&A"Ui2ߟ["9LmkL3B+Jn>%I!'qp(&:1v!Սj̮ RDY;}O;YZ}:84-GHiZ XO[KUȳ=K'T[u %;-g[ĹQP愳\Opxz&80I81KK RF*=y"(0N#H"gO*BI!#sԘJ٣G1VKwTޱP1C.s{ ̾ 6gƽ)lf^0\L\*ƻK ^^i\|#X ~} sfK82=^)v5>ҝH%Qn4og>y8CEt IOW#71Kv)hAIW5$Q 6GQnfrmxmLeRe&.ˇkz&p|P-TY`Ez-yaI`R) )axԖJ>\Ӆ\ז@^(UŮ ǫ+ xX6HZ9=iR|?9mXquvwӜEOжN!h= Ǽ.~~:rLS%ɟxz:8}[t{éX nՠAз$Ѹ5+ <:mGYP$0L5[(A'd  x<'\:Ԝ1ǹ kDBt rp>?*^BMxJp黀5_ȕ\hY ,A$l*yᰁ :R/OdPYCTb?2Tqp8S p t܁J1㫩r6\*,j#EYA/Fj81B3R@5tu2㱩kz"(u`+߄]@l` #cKRb "v Ɔ`hʰp4C!3< b+K27iDE@$鰃آ  \ x4_mЄO1N YbDK9@CAN(B0*e>D+in uIgErx+"Or׫6(t*<զ7Z Lvի 8orʫF$G8/ҵQ75"Y%-jJ% ;6$&I|-Z,r $21}QKT( 0C6 iL?F-'+_hDV{Oc{řtYfBJx@(+zSnxTOJ* A !IseR[ 0 N҂2c{|$:óxa] E-^~B(,x, C {Nsc0A!n$McgUd&|nAU@'(R4N_L].[%}d}Cl/g/)l)?Qs_}׏)*8TQ2ƑKP"\@*vyَQzF {>!!(HplL8zJ 4VA X88j ၴV5ye"(d'aM Oe2N'UOn[Ll%M0ݒ'425N.G铚%ˤ*Z)THlDc H K\=tfH0 302 C1ѵ޶BP]PVWЛzˊm26"N2UB*  d?= z& }NLP$@2AFdkBTe*->M (HRb c$w~`'yNHҥ< mv&@0 B},L /K8lk` }{<&(ແ`B &\@a(X1 Eٕ]s' Ki VvDl&2ltkD=2= 0OTO:V_nѺ[4W3eMKӗ/ t:ZY !ג>|T(SX&7  |4.@j s%D2vPƥcR,ue*էO0ȅ`'3@qrLi%)OHPsVl5ʈ;.hD"+`i1>MF \B`DZ*O' F:F q/i6UCnU!HWeIӥYRnT7m$l5'\ O`p ߿{&쒩]n*Ka(U}-3l$ڟwp aoOS$rnu/߶ $ga'PzC<ZJְ;Zࠅ&Bل~+|>*'ҥTd ,b$-&O cBBr1yrZ.k%IPL?ջIHK-U$ Lc3&d:N@!O;FWHF!Xt.4H95V54@A ^$zWYm Tk:̄ "vpurx L ޾z{Ę8&_\G1xrQAW @U |`[eBS+|Vfyc4w3j'Q(HNG&kpRZTqHLp36Aq#DحBbZ$VLM`'RH KziҶ0AG,;!P trguAi LyqeBnU%p#vJL(kA=ӉVmw1kZ& oR,I֘)w'&,hRSĄFOQpzDLɯ{{/ޜśG/{xln!Eև 1V-(KM dr&,< &$9&O;Y:Laߡ*T 7І5sxP)1SJ 7y:y>.x%kt1C-c5VH Ns-pb(HTi"%k€}6 ~V3JZa7k3/١@~=`H T)0MhRpFv_@;.rurvztcBۋ  r,I^w*:iPD[,29O Kz W :KJ5lDK!裩\\2nVB. mϟd?U((T`lT9M .0A WNAF1 )&L8a@i&F-Xkcb@.0Bh\th{E1. K̄YԺpOTP5f L}0a &TVrM9m+jA8z|4xй(g:#:ޫJpxElrl%"At?(?;[Wz`[aŰc<y{Jr$ U(_>WMĺYKH@>i8$%wu@tz9Lzkς`v !7σO2A 2Y$|Da\m"Eݑ#Y=9 DD q80+a $[&tf?LKc} ep|b< fMiѭqUcS\ Τԃ'{ *o}8fp~{'^]|#$" \Do%3GyQߒ\`&t{OA+ d\yPD4OG~(w.d,.&a@8>J(p+"PBd&Nɖa''ؚ$ 2O;zBEz Np+c*p^T6I &a㪚O^p'̢bqy>3.pح;ugpL pS+q(n % HlUUs֋L"Lvvb?AIZG3DӃ1^p_rx,L8aݪ?ʫ5sL:T.oAJO(Xe ;qR8 )p?Idic 'L Hx;k0z;p6)wt1&V.QhGr%8sXj. z&̵*nOe{\A;(HhZ(ՆK\ T񄳿 N|+ nݡ!&b;^5AV5$nc<39{ K ro"_y:r[$!(qkCzkaG |983Yb|&-"nl%umWtY&fT0 TuA?LbG2p4`!_U: $1i@B \6qiy3#~ F?cJE5$ Fm<640M sd'!0+ .E^!Am{ 360ȎI>,|dfe`,l6nwܲAiLx|O A<ݑVaP.GDaNѱ=iqq>KJ%zZ,07t7If ]&)PQD,1CAB%abPLb8C!U(ld'(sD0ԯ;xm0 r)M8Ho.Gpv2KF"=.on&L$pRÏ7S!B\ :aX:pˌ rGpM- Pm&-wOg.s Ip$*Cs9_4N)CЖWjgWkx$D FoÄ /kf{Ǐ^Ր#IZ2*86 0+(*lmKpT ~_zv5[iʵ ȖIhJBLpPM^Ȳ;.X$.0ս OPY64L 9.9~0Fc4/۠^J"5H 9R" :,G( &xGƗڵ֖MTssA׼ѡ1X1Ai@+-.Ama@ --fJH:b >~($fBl4ޡܯYg&HbXήRTID,SB71Y ѓ|^Ƀ~Hxrh2@<*z}C@/+K0ljb(pߘ$>ѦX`7RM8ltYELHHpA iN_ tJPRmHO,IY>r"ۖ -'p41 ' 2A!R, @ept L#@6!lXOжHw:A ك/BQ)c0 Pl7O4u#L "HNi$Q&DP@񫳕MD/ )ϝ -<Ec:Lhlw-, uÉW@m,P5p@P0ƂZ" )mʄh4pP0 q qf|I] b9O$KPTar y@s8IkOG&Liu CfT1 BJ{ ʗOpO1j[w MaCDע):V6&a`WZ4\75-~n8!6/g0 Øl$)zu1fv|d?@ wΖ˭k8Pn9:4[l4QAjf%LkDg<V,ӟJl\ H, (x&BbPdbzGn‚"eHMdm)An j Gb FoZ4`3"a( Lꡐʍ.X[ezQa,[Ѝq8۬Wm'@4qROdfB\_f³į)b ʥŲ_;ч]`'s mgE0O /g$WvfKB $1mo;+u6& ks3PAl̚b!0 e5XqD>:50\A;D&A-X\2."?h1h,lѱ֘",!H(T84UhkR`iaP\e.~5bvv:J]SY*#8P)[]!A-j(!F€G؝=POA8C$fwPA"obB`SoNC+*biޤi'Ru/n1/ bZ|DBQڰ~%#-(t]b^ k^}AqYh:[1CYgd@HV39 D" %P&+9>%IkOa(2Fb$k(kf2XG UBcbp (n F  dl!r4Q$H! AĽ 5i3(֙gC< C!NCp{wgB:BsjiG@ң_р2JbВLP,}<»w;9WLA`XUB%6.uM.MsYnlc%eDF.Z-:\Ԥ.|F갬 =U@pPLWSfLa,@@A9?c:< -@MZ ef'¸8"Z4J'ph/ZlnMM!ィHkÜctՍOP))H8*>~ >qY>OU5_Pe'90cpeLt[5f2)>Lr 36BJL'q$u) & /&N8TZDXR'1ѥZCDHdb9rᛩ CaJs}K]1T .BCZ&7 '4FZ^!u.s$+v7oeYYլB2L2.Wĉo7rE.ܫ3͢dl |cE@`D#JzV/_mic&5 FIK73ycM.P6,-L8"{55H'[UfJ \6J۰pݭ'd7kYo6&gǀUU@g {(&rB$ 2„7o%e?6q791bYU1m- `иy{xLl % )@!3]%lO˗!BKyh_o(& NtT#*c&<W vTV(*(}Z7VP1Cq<٬_[ú)KW +b/==ŪY ǀL͟t@5 G1Ĕ97ׯGqΖ#z~ۣc|B?6n00nrt>k?,8|h;z; b2"](PRsmfZmHN) @3[0_3 $υ8?}{0YR口?ݜ~? kH'Na05&LUêR$[d9~$y\+JfX-ݩ_ԩSxwY4 ngQRdb: oEG |~> :vl4y(ovA7…Be@?e"?z4ɛ4vPA^GJFW`A'fLH\|` P!b )

;€C| "x A\10ʸ,|wA ߞ ed)b( LxӞ@]ũfPxc0-zc-xO+lM%71Y>DaO 00Xg+i%cD=6PeQB3LX"ƴB'>7tpV B4="u*Y9jFYmpB%|<{fBi!AL;lILP:&^#Vy_$:_F<2b&(GЇ:`+LݦBz؛y_x¹b"%ܠߑ@oAPw#A>ܯ'd^e}O]#lA"m ,Ur £d#m+5ej:&vk*Rh1As< 4Y̓0o bKJo Ćenᒁ16 ´\?|&#b@!I=@ 66@c$n/1EqPCfs9$V LX5YL&fLabз60an3=9OFS׍AH3 ߠ1+%bk+# rIưӵTc#Ȼ}~Kk8vu-q&HV XWPi~eWy^PX5JTJ쪂.V-p&:<ŖVbߐxvqp1KvmZĄ``[dw݃#2!u~HsˍXC`!KCIz&[oĄvu;@! 5@ᖳrnczuP'bԳ%h7Ąۨ ńx6 e:0a1opuш=DLɝ*d Ppω` Bф O-Ľ$ I30_I)YNfHk~:{ D*Ƕ<-i&(QJgi (Q ]K'' Xs P`UH a#x1gJMgȄ>3Zjy;A |=w/}f Ge  1P +ftv5-|}R Bd d$@8SXYByZ'ɱkF&wwq„΄do-0z͐ ~xEp@Ʉ4ш `ALNa=dŴpE(=D G#2%2Aޅw)M7pv7`Ʉ'CSc0I_ ZgYݶJ(iN&$di>HߞuBәG˖LX.ؽ]ocLcZWfg!n//6-q|ز@~Q37 "Hl)ȾCзwh$Rc[gI`Ą5V:98-^@xq8 s!j {H*O9BJ vEn Bc&("% TY>vjPߐ p5yu1 ۰!MV`#&DÞa{NX};V}v16 0?׆i~HI捩a;Gr Bs @V6byJ%p)i=[ hL`MHZY.v'::i۔Ǹ4ɄʥHO1“PB[pD9u:BnPu#*ݏLk*@(2"*fہi;k~UdmuY-4Mpa8|-w`$ 󊠿bbq8&\hm‘;W!nD<+\ 1>lBS)qm1w,R, L $(`@.jUlXPp4ֶmC'N*?A1~$f q O+BE.!M܈Du\l%i V;E b;r,CZޞ| O `S?y{n(Kj]!%>FJʓLBݖO! nHPJa/@ T{-h:ФXU) YGdc!>HՖxWwcv*n. ͟]ۇ+0?b;{{$~rCu,9Uh" O.6> 4X'(mVp! E-Жo:"ѿ~zhFoXB D@Aՙz^K&tB@-b'"'#c$ܝ+ng!zl(;s)9>jZCc։/h,td1:l1ju! N^2q`&`%wvH@ HqEUyLEf؍$'U` -`N9Z݉F`3H9I3J{Eav"H כ +6.EV%9pujr"$ p]`ߝU FS&$xY}HK m޿~ 3&hSA%:̠O관$+O)*& =N7sbY0o>Mp`{6kn$CƎ LH@jHt@gomc irW壒8 8d!Bސ-F.J $GBL D5?` ӱW3!A &ٵɁ⯿d * ϟox6;rUz%l,B1w%lY;!CMhb]?ᢷ H3b+71IK- "P2 (T@0E> LV^8L|'&U&FU$G0_cݧWZ'pb`PALȄ qǨ$jIR$iHJ 8z ?)JZ=#Aa@!&uR\Dpx8qlzSI7Q6Z)"e/vɬK܌xXV+@0@p&,uV\8ߡ igpYUH4<>4بV ([c@n {TA #ZhO2݂L VCBfC`9KxP;GWi7U8!H1TO0촪\̄͟ >LP3&W!C ; 8# ZF -z-nbG/r n?,Ck馢 9cYz; -ac[E,̔Z]Vnưt "GV (=Zw}c4 KbۣΠ-Hip[r&dL |z|`m WBQR!9wDAj 8R᠈ '& )q Y&`#AE%,)A( wB 2) a@ }cJ#X*H|ސV=k\^>91z(Mhz5QX:h"`Ggx@b8<΄م &݁@>6f:MƶfB;pxQPOT2SSVRO@OMk}KpCg.: q㑧`4ҀN{]BX]gB2:q4O=ZK {;V.=Nhj(X?koů7_ 'њW [?,=Zwpԃ #HrG|d_ܲGV7xל /G-RV;Ƒ:c6F(jZ;Rd vV&B:Q 1/5 +..zpc#'9F}{/PELe&L\3W3vį}LNľ%xIA7>ޣu3#RH%s+_ZGícϒܡݳGkih5ģ٪WoDEƠG+cVB36E7g/_z+#jNՅDR7+@ºҭ$2_|l -mhIfEFqCD@(ѫR%LʖK@!/<$Bɗ =uD`1DoXV1lT=/qB `4mUXpeFƿ9ML>s:vI O%Haq`;]=1a:B$cNm3][,0ؾuЃm6pG6MӣmM[vъD/TT.l%W_[թ[6H(8)g]hPP rjYOxqN@j &;~'&J@ƿj& { pDKN]w!y$*|xR! KF'$[դ "\x+^f䮋j7dcT 1DأTʟ\zJTXHbGL)ቍˏq b@ k`BD?y&lwg+|ӿ`: ~yvz궢:!RTNH JU( 2mF ` z'EQAtZ^Ip _ōTu@q\g,CC0m3,0BP!*k*Bj|bF%IHPrOS'KSJ<l)nhX`ڧ Ҏ`oE`fh=^غi=4BTi Tmfmx)W!P]h~vwU\@X6CTȧhy}UU<eDP(vt\aZ;Qz'B}=Z)0-YMZT+fcMei2 V*lk EPj;J R87bLQ k("_?I @#$!ez* |Pr[9a@NK!&@JX)@jrl xWUd1=$oOzBג"y"g01k4@Ę8)] 1砐~oȄ(}9PP!&ɟ=i`d3}1ȋRx f2AɌ 1JETX8!˳ "IDɘ>"/@A8s+'PQ*yWlTTg* LP`7t/{n 0FSҡD~J쟾tgP멈 6]Q%< Vݜ ?@pD&5bY]_&42amAԣ3mv=Pa4No{=m %SV޳ju |wB(ukjtmaaJłSb'( g-+)WBzf&פ퟿1Z`S03o #-\0Kޛ[;~9 0k`![^!Hry%>|py4KCZ}~Gh"6+ϙˌn`N\]i v[wh?TceyDt~G a0<\i\L`! 5YϺx%ƖrQ]z݂MeG+چ, &r[u)@e Dwl E.%MV)RC(-,"iZ l-%}@#"LZ h\0mY湥@@Lꆤ䕬qm[ZXi9hV0VSGԘm߱ s~ 3D` T|S!'|<Pf!whW-h}5 j̪am(M?W^}w/^{kn!8Af^x Ji) b%{4d={w(sfW__sqryۂ[|Ɗ5  5'$q5$Sim)մMdBK=K 8sW m~|A(8kɦaP_]ymLUeGm{fh_.@('B&^䫏/_LBW/^_W77gg'#>@8 ˳ hI1 PJ0A!]ӀB>B?]>A(H@"g~Vٹ*<xa)45O6@ Tgxpt^r6[ng t|& t<ivpJ$ q2#82K: -j{OO/|m>ZOF ^-C~7nq ^Ee+s^r %G! 8_&2܅eUY5?_6tD; 9[P' %Tjb/pC[  Z{%#&R Z8QLHTBHXȇR ZhfE$!Æ9:Ǻ~J =j?=c|BώHZfTXe>vpd?I5r:Nd$*-T{OH++/t;^rL(3a5]>Q M+5qh5y#E:jt S`0;?!Bq\C,Оbh.yR4C70Dv dё"6Vwh c6jtE7%nJEcӎB΄  NM\ 4Cj)*D'Z>#t5w\,oWJTxȴ ŌN3!p)LH0O#|*LHo0B*^Q-LifzuXǨ b\5jY_;t;}I*A:HI NY ; ?E?x28e@ ;|?ȕ_F)P8}d  I$ sMKQ )UЃO] 8r:" bq="?AYy1"qS9HL{<[$ui`S]u0W9| .%Y&DZgjDP\`PE{疷*lrH r*E LHU;b) 49ӌY~QFar0Rh)VYekLPqz%fJK% ._}'őro&4Hk{TXTj>rE`'{@$[ d4zrLZ@$V9DDAn~B$0v@"xf4p,4'`"'U&8NXV_0+nump {Yf2! DL:GnMpȄ0 p5A@Do,7> t|c2`"j&Pahotm#&~BNǥO YP1,]ukqYKn.~$T~m)f`U ~aJVOT(oc@Tma9›jP+#y9DѺiVlOҺY:LXzi Jqucq_d0\q TܔZ'Lxw \l|2LLpH]U*"v Ą.L!ߋ#8c_&0D z/R^) T`/fG SYAubSL9rhH'NS@y(lnY l fjIL= ^^|n`vy'.7dB}R1^*TAK v81nPT0(9 X~/ xzhJ`B *Lu Y ٧LHpTzcVn:,bYa6Д0ءiJϢY;IcLpHjl ZͱHv[!ѩե0c"v zXUk~h:4ʙpkY<ҽ'=['o,}싌Ə00{"&\_tR]"1BdN1N o9h"2>fp:>O'x X ц%DY_B 3  ^dH(0-=XIVQ֮WB}c^ٖT&j Asj"t'pq>w+ 'R1Pzla.| PPmj]hʬvӘGh =ZG3GׁZ/o}16609g7{4r6>]5\UAAIz4U=i Nsl%?LF&OPt1G$d; ! N;LHj`UD˟I\OH ] w" ?8N4_ ѥNek&::t#173;lr>|a3l1gӞEѪPn֍ #PA6ZQ~P<괩޷8KnFekHxTr*-5UJʤ?绮'1AOу|Ҁnҳs+F|qeFnyiC\qw:饉 {pwYOfO=d':>b FDԗĞ|2aW¶ cWz_ꋜV4ھp [^թ@LQ~&pw~,X0C:HJonH]DXM [NF%( b#~8Ro2>C@0͋eaÖG+s.>~ "򐇻~%84nB1UԢ@Z^y'l7w` /yB1AlsԐPhIG"~p.T]BS%p;r'#, zݙ&% +p'P\*\륅x`K0ZCc O˧7 ;p}J D)D&aF^Jo@<<}BO gE[%r0Z`4RTx9 PC!>G9RtȄem 6϶EJXvqx*'n늽8 .pb mA!G|AgF)L& ݱp5_Xnbe~ds'='Xq#I_XǰOehLșP`Lw;!bv ?.lxџ&`_$R܋F4XuW1φxξʑh;xi@dBmC}~XS7 kӂUw/7'1um" u6mOCL(8X[͖#?dl &':,F- p'V AW|aSeN8*vVu}֕\9):yL YE5r6O>l*PJ=Q`N cpqɄa]pNtVOÅ@1%BĤEbAreMZp0Fѵ0C Q"DB8ѻ/50Ƭti@"@WS^*t͝I!Ix[ Yj 1^k6W`1 M$B}9e(![^ L 0lHJ'XȄ@ vS"y@cAY??߅d)Ɯ vJJ)%TSp|wɃ*.PHֳ@]#A`]210q&9i:k:ā5B:?8] gc0(jNQȱ3!‚m0$,I@`.kN @C잫-"'_䃣r 8O$uWX.fGYW:[UeZ$j0Gj )N%D hh9`*28ޅE؄5{F1d ?~Oh0*vPZilzpµQ&L Կ/3ېaNsE|N$)N}K(Pt'l;”+]K*`=(h->s񇄐 aBTF ׈ =^u uA]kka~}=vCb.X@qFN~y)֏oXZ8;{Rc1ZJNh?+q ?"'p9E%.1I8 ҦJ%ZGJBWpcw11#6j) SjT%`'n`d[rw2tfp ;W%s=mY A;*mE: F~FMLhFÑk~v}9ys}礧 )+ܳ1i] {vgXAfs@uYղ{ o;('pt&ˉCd+ȭӬwxplP'8G7\V@ B%>P| :|@Js :dó_wiDJPkP·"-rB"Y8aLV6I!M(hZxCF:&`g kdi H?u% ~(9>CSL ##.<ٝ`NCqQغ'p_R4EɜjD}HrxQJ%LpS(wʈ:AEB\iGO=a`&` X:A +O > Lae AAb`!?}2c8*d9@m cfPFnȭgE`?l)c"P/>9F >K߅jD'tz LVQ'm#{P;BR4k3 ᄲֻbrK0DM.:PB.kmz ?-I yNpu'&T'X<-6F_mvU8; sBYzgvzֿW>W_U<xL%_8o* 0\p Xɼ E ǐvEz󩓳p-H_]\q9_1T}֛x 5OVyE"bQ5ң(ÅVMH ,*Q `hcޯ\O @ ; cwB!)v785{$AbFN_9ifeň]viyz7a1y!2X4  E)anîn/jOmZ&,XIJ5G⏩V,?F@N0/џWTɔ &a&'4Ӽ(-4УwU}s)9mzw~@6>^J]Q5>h|v%RX.7P\Q6܀x7GyPg.ԃi=$ a]7k@SZJoalR :`6|vT2nq 9pKyeC8"JmY0^Z!g"d`I, JaKί# qƭ77Ba";G'N% Sh'@ g{il{p }{;M=t2ϥcr pN _}~tzOeXon.CڱG_k/Md M}+H\ S~hاR:Y@a q*Nu=  Fٌ@:,w1%pK ~,MCXnZ!F`YL9a[QP :{r| F_`H0Hd&pSֲ5:ԂgƓI/6r+ 0҃{''dz{)INXR$A/9zGwRۚyNx#žA෗Y}}! ?YT^e䄲 k: ٰ2u-_% ĺԑR5JeFc ;+b$F/a,ʍk+{:y{&F>Pxupsc0j]8.KJ`|ѣ<{,T`Ca kpl@Fqju+߂P pRm00NɃ mjeiJ #ƫ1'ziCE~8ke\@{;TZPN 0j^GjciEܣbENIe~qk (7~6)l| TV1]i4_  A7%=f2aD2qj;Ȥ @Nu 0`: v9aOt[oR_RUB%#DL+lZh S>RsL%) T kέ2f|6c0^`@ $sPT9p&#4]X$K;jJG 5^+h 8;M M=F.aJh>"-ۮPxKl9c0. 'l'r@6IS@aA8ۊ=ZC ]w]5O.}?'\ENY[9ε[)& 76ې0]E#x_wb.l[8 9?1 %W {Z[  傞)'ĔHdRCcVP,Yi>PPCee)bwDl?A^VGb_<$"B"~bQ&naxeNa$蚹""ύP넮1#NUϓw B u,#žT=||q &H*s5fiEKV@!0XDQG̖PNDNYAAqzO[h@c^*hY<ñ٢tMe' iR7oO˗}sv0{N(*AN@!Qq[(2 QҲtLqiմ)6j?Y`u,uieNآr)D>Pj% :F5QpqzD flR i)~/y¨(XPw XyFBDbr(,!A3r*gSx~nM cVHld)?c?T:E$Y^ 'юArBD3:aTޓ ,ANjl=A YZɼBb]JQ&D)\-E1 ?>~3>Kcl ['Te}+3x"u '*8 $ª$0سAhBJ ޙ{m6x|Ȓ$dgENp #Jlhʝ{JD:<jP5C'DeK@&X {> y~ ZhKG,12Ӕ@F'h@*W$Ƿ.qA/a`)>*\t 'O0>q}ѷXnM։PS'~*^r }vDBhz{uve8v:Zvٲ K,;(ʂ(UgýCˡn%|c La,;0YAvM/&x^ s($d0RTӀq.SG2@n#?F$rH P$򵖋jYf)@!V`DnU0ls z&x= 8#N:U"4>41gibs@ iR1Û:r+ռҜ/K`wro߾6VWclبfp-{k|wՌM]:I0mmpEO2n)+$1@>Ղm JLrD a*phVLS(\"Q&ů9!P9aCთEtv%e(ӡ"5"A,e#fȨqiC.tyRp$K@dQ ǓGZX2B6UaHB=6F4!( CHPt5SN$Sj97hO?8x08T\|q|'С6xNN"+uڰӡǨCmu愱N[F)!9Fֿ<#$8d0MB{mC1 t61`:l $P(Q(T7Xq*c%mЍʜ3h5@Gr{00' C݌,+V)hq{?D9 L7"ϱ N@Ӂ v@KNq*߰\&P M'/}[+ز_IA.UA۲&U,\w._uQAz3oڏLr˭J!0%D`2U )mD ǜH:jӤ4?~g?F;McIL/XufjL%b6:MaMU`3\a/ P'P5c0/ aE&@sh9FJ@o;I d8PP<\VE/|C]DJ$\<+$f{%MmTk-eW4\ŋgA@6ZA't\N$ƪ'J ٠Z@Lթ_h q* j*=t{aA6 >ʄy:*@F *{TB]~1~/R\-:~keE"!)r&5Cpc3w{t#lw܈@ MA:Pm"x%5SLb1`ԪS~nk8 c" 8a}`otǪ!x$'gnlvsZ*jd2 }X-R +9":YLpږ+y֘`Ϥnj=lU5!%R:O 8XInq5=^d!T>(IY.v3 ^Ʃoa{H70Z'#耯h93w/ Xd= u n<' mLF=Z5?!ה%z] n& !FҚ&Yu=|/l㿽k<$NL U @R9߄;$qZf5ڄL1}7\5Q'; hy47JTsy+H o%jBLƅ^YyR]'On|_jZBO?C';1@)nrz N$X^RB$'<F_`GV09 HF͘FT(PF pD.& #w6k2+ 8JrB݌ɒ k @c2 笕t9CO5I>''0ODK5@Nwp} VIiT 4 p@|pQZNH dQ5͡.ҤrZK:eji=Uگ d4)L{GOe=Q-8!euUGygZ0*% Z,ၰp?v;XyJ;̬!؜p# ;2N.*3ב4am8ESN8 <(.9ZVRN '8!:dIcдZ0AMѺ =Z&'Z#=ZܺLzA63z?g[ᄯ-ޟ%Nx9lopW_={BNR Kp*VYܨHk6-):}!%hl 9Cr)!sX!7>>skB7 1'03 oR9җbqr*L6BtI1p2plP  F<%@A.\X'`1hf9(7a+(b#Bѝ4ꄲN?qntɦ}?V1j ǒV81њ{٣5YWO'/tgN]'Vӓg~6p&hZeT(` S>`\6 1@d/<4F۸pBJX|sdaʪdh'._?<{|/?&*"pK3 'r_[rh@ XYf_C ȭ `dVt*ShWx6sz~,ιk&0t.LbU џp@$r< bС8!ftHYN X{ND'!+ ):m>1'0b0EңWhޫO|F$ >P/Wω/y۶/D1ٳ?{[߿|#?KϿU x7_L DP% X`SX%6O5+C!%6ۥ`ZcUA5E;{r( \ ]*hjEB<ϟW;9j3<|bm쿣:ݡ]f>$2qDGGuZ8&r`%Cz3P|,@a0XRYnv؜gB*xd:R\'Pss?"-LΦ.œbB9ƠZ&*!rB '`EgSBK;ǚG%S::!Pk}vt A7Ub<YSNx~UENZ@̓g*2KS[O yU* Y_לRWI; ǎI-\ST΂5 2rNB eO6#% (I3D#m''{{wvHgr'ؔp'<_~6ᙎ;u!ŞQ뛵*vе]VH@J:6i&'[apc7bj3۸ h;,Sa`>$cnY{QVeCXɸ7r K1_xأIߣGkrnߣ5=Zzt}}侞E Ŏ8^z.;s$N?lֱ_p:M'uږͥxԡJB#99 stᴱ1 (|p_OȖR"hsb/c`?w]'@f)_ʟ~p>/{;e=ZCI1[o]2bEVX.(u8a:6B03Cۈ`Ȑ7Hi hԯR{08i^ $lyT'r[@B2?MyoW~ΗvOY}XWg zu=Zwe5??^'H,IN6"bUoKEZҕ"7WdHH־˅/ɕ79Jغ g ĞOwb Ʀ,:+e-Qbe O 3EuJo^J&nv0wW)NЛj@BOȟL-br#W@"BH GHMFUn 3݌@A~iVӒ;ZJ]~J B;b0cǦ&"=t"# TdzsiJ`}s/X~uvv hM罇kʒc&tz/d&t8O֢^߰uaVg=%'H@fh O˲h% vF$(RSN[ְ`!վ]@Ic#LxL sE4ζenyǜr;D |WU=aMk$.tN>c!f熾wQ0]8ϳC~kfø*'0 ;LY*C<'U9QXUN`"^IXj n8'd7s皧J wHl-ϓkS:aKh&~NDl A"p0qg˴me7uКklth6 Kb>/*pOzN` "PZ0KsnQ/^2u>9KjMP9ةF2WcX4wY:[JRA"$PUFі4-Je0ֲ-0;6L#4%"if,$FZr2Yl[XFaPan0$$$gJ&C/\СPG^p?DNJ~{;\h7v۬*AOξ~r6wPs!@ Wrd;ѽ M"FBb<(Ժo4$ u "o8MtFZq͚ ,hA AwZ։׼P.5&1'&iR,(7k+"5NX28MɁՌf:&lC΍eF"xer6<S.LWedv?%`Q(hpw,NnvxXp;p5CN-B% K08 hg7%)I `P'A$ރwUl()khqՂM ;'iDaF'e2L-=&iy!Q{GkHuY4{o\$=@o3kCN)VyQ|UXDŽ) BmA(ڶ 'h\S䰠.1<9L]RtLrƒԣط'bO9-*ٷZ@J UD KI IFQ%8W-4@ >qxr#Zp2_9JrB4tɳؠW xH5x!U4 N"J0.-ׁ,(>xFPZp=9!()L-UWcpD %3b@ SRyI!#L͇d<&Hs` ȣ8aF$*ԏ1Y@쮯o:*E*',D%>u0P i6*lǦCXPR(kNYNp(уu e!JNt|Ψ!a ]Ӈ}EM['~`9T7'L= (/oG -̀៟=.GښIAG ؊-rt²eN9cT7$Ծ{$z@#eHP9I]$Pv}]w^9!/=JM8O}ݿGmoUN'.)0Rc?E5CC6fb4lێC-.p:7sU -u KZ(-)7(HtFܡn#`)’XB] flJJHSAE?4g0Cᜦ[?9 }De2Jf%[ވr~^r@  BAc.+^FTcI0 (i}iɜ\'.h FʖdLFh$w@8Jr!ʄ b7B>e7 ![EsC1')+( qTlE^xC)`h9B, $)۴m3G0>II6<ʮouY$򒹑E 9!;7IG xv xQ7Af 4ɔ,Umbj74IX7fbH 4JI5= xCCȞdAP0x}$T"DBh;!.r4dQN a!2+-6q$ R+EuB џLI (6rvO^oHDd ]\.0&StjF(kK||ȼ T7oE`X cVyp$Y֊{eOY,B%QhBdNTX'I$Ŷ܋ )alsj]z9kк?߆׈۴" llpڙ5~5p5Ǻah"P VhT'Ǿ ٺ u ,QzVFUK$:1MiԆ8 '08A_S)RFzp,KLԉzݪPݦ)/֤LYS&,i#,=83\Iɱ퐰q#RpHK0{ܻXh=d !wIM "C7Z]Q&0c%$; ٕ E a8x(vZP Sz/A$"+Luu(%D6F:Ke”)ME<@3rB3F NеH C=>-'\d?䄫 '܂JN诟9mO}8q;℘h8\bʄBm[/ϻ&vާ7oȷ&XяV04)iM+$ v$IaoF$|! 9A;PP(v WEڱPfF '48 Yzu= Q أcUNPnhN0MP`T+5~;QɠYC0|Bk~x0Q/}rUu^p0 J `,u!;x<(ܻqBd_` g2NǢsD `Np/8a!:A6K8vAwj;߰3)[O޵ Jq hT6AD֬a&7$&=g]59  'NpXYɵ +#H\ؚEO`,$H%kAG 7车_GcN)iʨR8__e'^h0x& Jc'2itO;"A ujaswT';D Bӑ P±9AJ8{=)L K%Qghq h!<<bZATQs%˰> ݏ'.e#T'eB KYw{@\cNXϯά:vfpMT,=)^:/Ȇ1|n0b ?t /Gͺ؂ 0dLX ZM7nӖhs2[5Z\Ni2s;9,8!Bm& ʶB^2 04rҁN(KFFvS&yQ3gz jr4l",`B"#%|X>?'zEf(Z@٣-Њ@h; $g g-.aTr(,AYBLҸh=j{=(ù [9a@\9(%DE^#',>-މNX gWGɴ*?N_$g9ZUzNزKjthQ aF'f(!A 9%z+B-=k5u 7R6jk*zv1+mNg<'}A'V8!irF=QXTOo8!nt4fs-'ƤP|9ݼ!@Ia\[S1VOndݎIE)!9Rg{,V:ŕ; mV{ XR1p9Ҕ7scaxXq\{~i0_15m e%N`m|RV@^|wiv;r> O7_}L ' :iA,smLrp= ?˥MS KFhS8*EV+b.qJbGl]'L!8 #.J!5kJ0ü<8(7߽䁏1x従a!Q9+!jyC:@j` 빎S?@IeᎻ"iH}ބ_~H2O}r70'^1۬=ZhJpΗS3 pbu|u9al=zZzNzZr,:{/g̱AFP`[EBbFrȃ0/!?5sOegN?] )BG&6s/|dž&N=Z?[&00H`Tc@pP}e!pZr#hn(_4#^wo9'uN<  /zDPGP`j(,m,%1ݏ޴G+Ѷr %A2=#NN%Y!3!pB@uRrm箁pF9Gv~"}aY .'rF ES0A 8e豥o6 FIӰlyB3 ;.)8/>qpB~;t)֙"JkZcc!|YkNDkΝq{8.8LfE/* B`f5ZLCAp3 Jel'Q[U\4DNƚ:ja[`6†DAlm(Õz0u&v9ttmJSżNBb oGDt D2P\n)`KND-8XcA|v~CMeN@)1Fé`]ى"8fuNYk(++1Sf1hBO# >;Bb)8"ȃAau[N)d{/#"{ BXяA$ _&dRtQdu9!s4da(*|~sBX [))̻pV`t WCQ?E)8oyqD GڈN ' (w!'|@@,)FbhEY;rF5㔹v[AGOc i BtB e 2p1X!f(˻ FNk[>Hn8]+kW)%3o1-E XaeA^M'!rT '34#@~/LzdF8*#fDtBA@1]-J &>>?#I! XcCqUX"'+l:䄍;h9A ĶHסH XdyssFlͨ49G *@ ;y* wrB%/UQ1x(^\ecL]n?5D-lAͭ?zœ:;(P#4**0OM@sƧCѨ"+| uK5i"ݠطH8sIlNXZM\ S l KH &sōD\J ܟ 3-pow6|oy#EZdBT VyBN=fJHF62MՀ-̳Ƥ`JIS/BF5}{S0@_!.Ѫw\ j6\)se ^(r9&g#4b]܂O #t @a젮g%CR-X EA(ntZc"`H %e-JAw[xg&<`]0k8qqìk qs2{` -Z饤 r(&tNnZH!sBY6qb:znV} A?VhTXОB>YA7[HattQHԡ`c26F6,'6[!鴾#+ ?| =fqd9'tD&"GO96 uHvl;Џײ,/49K:ȱ%jnܢ)R٩[ԏ fv^[\m d`eow2sF"iD#=~s!'%4]XwGsNA13cI@R`]쯆@EM98 SP\ n}%W֨whKQ'f( ݐ {/: 32a@50 <:)]8ٹNP ~nsB`Ʊ"Np:p]uAl&ћ*'ҷ)U>xܖߢ Ư?=J9ٳ?{FNrC˨k ͓&Ү\_H`>_\tMC @wg F'fֲK̰+ eJ٭՟ rZN)a*,d)Ap@BHX'd@C!p㖤` #ͪ 2aau):9)Ec2Cù$JNʂQyP!Y!$蔜H2\&pY z ) EPdMc"Ձ ,yFH&,4?B a*2a <s'RS^5 ?N &Ƀ9d ƢmT(dnOs E|D Hq4l}LWyMc.d84ҷ"4/䜝1!I Ŏz|ށ^KϏۛw?pk욛}f;JH##Wro+&6i 9g@*S{BW@ 4ښ{ A§ $+5*(uF^Nȏ3T(402q1:'^'i'ӹ﷨PǶ~yhÚiZ5(bS\c PěΎP3J(|,Րccf4@*wHl3pQ*9o¡\O:%[:|&iޗA6S/JJ A\+()S>^t.le,gFBQ')ԥx% /f0ǜnʙP$$q Yc`@ Í9yGA[Z "px5%Clsa,J. fzՔU(:*Zl' 8e/o+?1'T?_,LG8^PMD0S6G2"'pN8 !0pA(m8ĮoMu?wPPk__W] cW&9L. 798͔7N@]q~uIOda@ޟ7V;+m::Bc|mfG5e>hs&Śgc9\Q 9$`#|0 T)C3C 08Ew2uBd쵣v2 S{ 36R:/'T# y(Bha )My@RзBo k 6 .)؂=5Gx̠:j8F 1?6 ~d28̔Ԡ7I?xm%tEB*'iR7v>Ӄ|?=k5߉ Aoz#cO[Y+O(tX6kdg'Z\]s~a-Dc55 Óymvmy#FRS@0ŧ1!ɭ n*]~'5LZ7›RPNQ/Cy 9aQ!xd4-۩?ۚ8.> ǫ  / a=!Y ZU䳘\,I&:{AN9/Az,`Z#&T0ׁ?q'ą>Kc`pNda^MJ Q'̹w~vwDݼx!$d~yy%<78,u!Ug^ L3FMXZ[V016.H&PWx(;vedH2ל]hd :b0h 0 nEIDN@# =1.I3.uaMj}.Y!e)z]دS"'NSsn(!+ /H XX'p\ED`abH!d-KRӮf(ј(+?)'M= 9)8feB9b"+G({b܏u/:*@ܡ#8!x="5[*1~f'j˾wt Z/:V]&g:"4y-, L֐߈F^'Lj`=lpJ me9rJp9@UeQTq~Zk)ʎ[E]onҸ׍{Jі"AP&ʄ?K'PVXCh0_z#^Yd`?dP&M_Y& M$$k8Ӄ-r4w2!Sm5&w4Cˁ?%,bNmw8wsnyNP_ˆ puVtWcdn'c٘\N3bwsj@/X&)}t]\9^V'\-UUT@u4oИC1Sq9۲''x\m 2P]]Xue &1 @A%::7]3NrǴIG-c(DJ [B$vO ) 3V>GRMe) )0ms!%@0q#Jš92 oa3uN%WwC؆N8/a^ifg~.Jrm($hCğ2|I؂$܌ma u[t!9! υ+ ]Pu0"%ބf^'D&Iy|W hc!r5[1o R81V"'w8.3=kOC=>^ >Fa ƌ$B1p. g!)fZ5 >D;ցlJA.RBωCXk"Wxo4FꉀZ@yR܄DZԼlѣS͒ @$Lh1L_=U_9A m3 Tp@Ia4`4^)=#LB@)%:(8H QI02'Bq( R2n9cˡ@ps(N/P WƬkavk+TPRg Mg LxdBXibU =A1L0?ZzV? b֙K' M :8a_Wq C!(m/fȪ&Y]B2#`,FơOpB''r(ųT{Q$ ~E]ӳP 5cܡͼid3 >"t0R~}eHl{gBzUl&DtBf 9͗Hx] 'ҀB4񠤐 )%sQ*xd\j*TP*G7cB!Q´ג3Zmu1Y3aZÐP?r6qȒmp,pUI20bKwJ|,WB 0C.CJ DIZ#;|k4L|P7ސR$'xE1RdM#Ƙ R ԛH]PZZ(!s.I޶d?%NNHrkjEfk4oYvR''TՔ!\F4K|ߢh; #p[: ȃg ,al!f`q pHJ蹎̖>Muw"]7t.6C W `,RYP'pǕhUVfiHy\rȪ̦A[V!#2!ۂb &{]>|֧X<,(t4x W"Pr.fsT'Q&h'z?rm+ʝ"%gA_Z?i ç"I!d䠝ͣ'7d Le_?v ":ǿ=%%e[rWzRGKAI-'d-WBvSS4Gx ()*arf-e(W! ARF[-Kb Y@hQ9˜iZB9Q\sOP`^ 8=c'dݭԟ@FUc"!qEzܞ 4=Q:tPH:%QG/1pl_W ˢ[v+:f 7?K &\8N Z?e_XvjUc| _)=Vvo_T(& ^b9j=|v֏S(x0ѡ8!鄴: t*GO52a 4'd,(,c6$'H? @'l`5cN5 XNأ1|TnZg\W6-<ْw[b;@&(4&KI }"nL5ţ'ol4 9,yۻ6/ݫ͕l߼{Ms7o\׹7o:*3R٦h}ѿxE|xӧeڿUůe/|y)W1./+>v3>RpP,lM]Muc(J@@NSN8l1ǜ0b u w&T#33m)@UeRDNk:@|Z aO4؏ˮ1c}on]m@He]ۊ֥ ؈,A+/_;n;CVa9+ef4V,iyQBJo`}!6AM6ar7^^+/3)d<8x}'2 9H4ћ#„@\dYV)NHz7( 0dM}r~j5VՉC9tV}8pG?V#b& 'Loȕ< y! qN.7Xɥ/Hr wI'\.vs :ɤ-gLRjW>w*EXZڼiubۢY0Z3r}޵aEjڢ+_wu|}Uag:F]$8(Ps.Vwm}ґ*='d0aEOii6a?όfLp=]_qm],i1Mԧ›z`BbFN5& CDtںrٓv+0YD8Ŧ4 "al=h9VG}ahBOԫ1900#=L.ڡ%("b+ &h*9'5YҲ*AVvC| &9@nOP{kx'\ \i2qE.A:WV;%,㗘c_.v{+k\QAlx[%fp(J>OĄwߎ1zw)6N)V֏sxR9c\yZV$\OY \H|S1퐈% ܏\Zhλ71=|fw;&89Yqö |.3[V `vHj/nlZ\1akfdɮDqe*e{ȗZOP%Vy#ˀCi:#YKA7OhybP66FOM<&쁮HfЖ50xefa7*p_g/rCI $'zc՟rҬ8ZiS@`jP\G.Ƽwc*ER'v^zTOs4IWw.Tџ;*'֌ /FẈ f8bOe~CΜ'fOV4#0aL*c1ۨ+Aa-U}QF(`vf Ɔ> 9G7n RMKpf7pidtg %!Bw i">+leLp2 ɀRB C֙7CTJ`2'G<'0&Y>ƃnOA֮փҢ1!Ͽ>Kѽ{Ǚ׀fi͞1a@:G˜pVg "O`IgCjDвYI֕k{b*LTE@"PfnŽAh6Q3X;9voEm\goaf_Zj'f4shZkr1> =@/4aO?OS$ExFN=Jڸ~ L1Cjv(5]WX$7@f |0!f6;LP!m§_1)oJPsxol mKGa4&LogY^KOP&KvkpȗeaL PmLgHx ^2A㵮&)\6Xx-kf%Zab-Ys@o'zCQ?u> 1aF$>IQXhx!Qz; 7)AtbJ^i=aH=!Q"P60AX|+ b[UHA*l$=ײ `CDpbLK&(@. 4Mh*]$^d:gg*PFJ pmKzQ}/iȝA% 4Tr $;&xw0Ai4z'L$<' L" BaߟBB%oh8$+Q܁'<)&\Ω@Opf3KLpϑ֘"[Bhu4gXs|RW |>v'EW.4?jBM`=aP,ìhzrݠTB{꽪ōwk>2ʸF A{9)"qUEn|',߂[i=0#0pyj4LY.E״m*ѷaMvc¨LhGf=o4G-9lV<7ph6qLeIW0Xnf=j Q71Nf>`T'ʬ{z rze*2 $&F)&rY[3w'w8.cf'0$Lcq \vl--@;*WD D !;L3qh-Z.̽ʋ04J2P{-ϻ[bC[E zh*xPUWh+ B{/Vĥwy$N~2{40B%͒?&0&êfL=0y,7@`qFPpxQN|W{Qk+" c4n"B{6a`/o^;4'β$'m+&8X "A,[Qޟ4]I:rf0o-ڿ20fL`BvI;㛸N`KHȃ?|pRRLx, {L8j=E ydk%Js_Ľj򱂮ܖzh+ѬсwuCЏpUq8Dis( ~:lۚȔe„ ̨,{x)+Hf5Ix<'0ȗgzB8*jLG$`7ɉ;!| 8JZBƕ0@L>_x0{D&?l㬞ÄL„8'8}hLӣ[H(,9q$mO&J aG7б^,{ePZHIxr=8|QHWOi!>R~\󹞠'wY36uiˈ`+o5'þ[V wYYY&&G}g\$-T> CyӘ<8G϶yQ%Q's.#JS "V{= RQ놪+AR) SOcD{c cV4$0 ҃{ 0pT( "-\$& ?˜!1!edNo Xx&8s&0@2%ZqaLEnnn$XԿFm@zBhSܯ練.a!L~"q0۵ZJQAO)\\:zYEϑޘA1*~Zp '0$Lju XOx(du5ǃtO|F| p*ہU?!/'`wR`& !0OE$Ȓ؎%HH?ϸsb *"/٧ a6[@)@w_6u;C`fHC|\= >>RY%wkT$ϒ,ͲZ{׮q|Rpt4h}/ځ``0Z;lr~yRCvT9O-̬d`]jAD 69rX`幼+_ylPu[|p}j|E&X^u!噜0x]:<8Ԫt8%]$z Sn=m^#V]O!1yhvfe,X lt5PVoSbjCehF-:4pI`s] hlp2v,xS%  un:Po pq7Q]; Kp¥p ;P@q󡥀'QV>cg(w9f :a\0%%|꜍^ -pLTm/ya;贖+ 1fB.cF2K4wЁ̹PMHENF\ʸ$fWG<vjҋ=~94C8>boH ySJm 3 -ay5 aPϸNeG]fF@AB xn#m#ՔZN 'D4g#xu)'ߩ,|'1ブs , Ys3Z]*a"}O2B۞ƿT%NZs\wu[AIpF z(rI.RXW,yB~rsŀFo_URpe27=8p4 !EAq$Rݒ1rl`&ZclhSۛNX^wi<8L H x(@b<*hϵ UpwX@eoٹǽnjłm@K &J?i!FC*q~ \ )'l#Tv,)| ֆsy:1, 'N&VC^Wh"XV :~I1乱׋UvfMXܼ{ pH se\nA+n' ;' I m5fY37P< D;L'9A)']x_ ,/j4 ` 86'`ZYF;ԜCj&x̥^ @x+wgGcaR?6q`1LarB9VxfuU (L(bh4 &%H $U&p>)![GR`¹QDphjRK40> Aܖ;L0NЯd/G֘0rFkW3[{FbN,;5-0H?oDl8#yUbWR۽VF+LІ`S$}pC޽β*A@K$pǏ-W+l2wM8W%>C^|{&es1,k W+GW#$dH &)N8DNXb Q]"fY쬰hTX (+ٲo s"y)RUmܖ.@惌2.+2B :A8; ^^=|5tE(7nǍ=h+D;A,uNJ9aSdeeĮ=o,HEV 0^J) E5>c c4~9t](FeGVѺjČ`2b+-]WE kaw*YBFmc4ᖵYke5κ#'Y`J\D\93#~BKN,[YrLR2aidxS0+a_tzƝ5acʼz2 P .Ӓ3>8醾A{u Vw֫A ʂe}0J^R%zk8] 2iÿQe3<1L߆!Lu&+,1F:Hq6d*h~$?D 1Ë7KI/T˪ښFkkM{GVr5 F]"_ .߽_{ȝ3̩C+^/^w%ŦvS Z ѼO @/a'6Rc*B>ה+=EŦ uN'lNEKFB*2ȈE4FJqkDyjV;xzUZXG"}'53a6+4)0¨7bN/mL1sxf4A`uXddfV>pq4`BzpS-)NvھX!^kM܉! `^u`\y;AT(额`*gyJi+UanWT GẬ*mj=Oz\Ohl$bҟ[;1[uh}W,J&-קyvf,ov3'N4' 6VN D?j:j.R׽FS%^^ 7s{'BA/޼/4c\D@/d>h!j1BӞgGkySnऊC括 ,\IaA.9)EA Zے;^ 1(Fd _vϑB ט b@{>gB\QC(Ϙ yKVZUΓ$]ɓ|V3*#6ڵSEJhaMp\&V" 8ݦe5a@25 kg/}rBDkp9yH;6LP]O/zeWB?ZSGA q߯Svۺ?x-]:;֞v2R=0, PNh)jN%r|%JEb!{zb|cHJBP 8ӗP%/3IP`}Xٵ4<20jGF"#0+v2qiJ+۪:KAjj&XS͚KLHEkfRF#!}~t1Bi`ᒘ5a94߿J]|V W K O0GGIժ(h]_F+Mh.旟E(Ehi%#? ^| ֱ$r,fnH ~߿JVԇ)))nnܡҵa ~G-[ Brdukle9Ҷ\,X-}>઩sQBN8E ">;B2KRBpMK."!F(: 4+AvDb'밾yNxtax0VL a%6,(8c<NFrSF+84Z\_#ʊzmo O 𸀠8^iPUufT.rXz=4i 05~K!ZtUR; ckEĥ92fzp#248RB~"sY>'t"+W6=EBgf4rV8x?bw)*ϡCs'\X(2YZ09NyɳIa'ԋO7먧MMR>lۈV&`{lSN uNŨa'@L%ېgB,ClRBp9ߠߜyX!Haj*)̜VXU$a 1gD5V+N4E pz4ZvzI\Gj Y:+I$ J/Ra[ sfs F#M}BEC#C ~b^0Bj H.xG7`}$;dvB#KET<.j#'WvYҮnH;7Fљqqŕ_cCE= ֢H / B 8lKF KsRJ*q50JiaXQa@AA.gx)U9JJ0ALmSxfvc8 6Ɣl!Ht0(bhhƜ:1"7 /:Ko?@>M7cxvpwϠnW= kU5s8!H []Zku}2[@uSegL䅝 F |!iG]TV0!h1(1)a/@UYzX[#Uf` X3TF%m l,ϯ38@ epB _;!I! F~س 1 WMlYkE M /6B 2[(ǷqE(!tCG^0zmd0B=cTLYpQFq1z`up=mk9pGN 39t B@è!q=*CԵ_M][N`hς],[RjnBDzbxZr)/fm_fF9$䉵y;"UH )Pv”mP0OղFhL (("C.u=f!d 38|3ueEQILeb8!\--|{H 9N5RV^ 9e0sAH'gSYSHnI (  U)! (+,8e,g{>1?Wi} a27%#jg'Ia{J3@NńQ7bsX'$1'(p$ wn6?GzFhcdeX՝֢$vzlwĴ,+G?u5Ia;\NvM:ǣ`;BB($M0C3uƲ Nyc(3:J!n!-kanYpUIS/Om_EZ&%_3 _[N° (@Eȑ`$xy@B 8, L!J%T죙`Ɠ6ص<[8%)Wr~-K8/ gA9I{Ȣ qftM8HaX`PU8#9#5YFIl )M=ןM F " L d ޺#1q)KO/[6@W':9~k^9߈]t8Z2CCB"%@[ˌ2ld2\5]qN3EK"6yriK$Cx; Q眏e pBP$S@]s 3r'KKC1FB*ҪHRuKG)yJ_[?RªTmɰH^ÀtդDH2BgW qT/ 9@nY د#;֍8nF?thv/͕2.ˆrIJf,zaUH6ɐZf>`Rk۰OY`)7N0DR]lvә9 KE6e3GN0'?mW8 @[iL`( 2F ר@QN@VംʊFB Y>Bԋ*5b[P*# %סJ p }̃>zm~T.R&O>0C|y@C $T, cuN&:9Ao݃qBiݏ :m{i o) Fȕr%xN+whs"px$b59 -FaTn'$3CvD Arn\t p\26Z :Kإp D嫵 f`U/vW_X5)ZiH i^m1 R*'v^@6Y0b84b+eJ-Mr3m9FH{*i[;_n~_s"%q 숍61#x*L/!I oN0Nc'vB'8 y]FLzbhDVrByكġk!묮-~OUmQɜ+u/qr ,oCLKc)Gj[<>/r2p`T.'r%7~D+A7o$?$ ^O%ea!ֲY O92bPCeR|lir Ǔ7FhDH:}`ǶXdy+읨}zf\8prծWC2bN !aAh2@f/['C [zh'p=a/vq0(${V.e-,pF*VbL5ZmFbbe +j2Z͊5tDMZ{qN\j@g/aI0ٍƧ+I+$H+$ \@RD+;2R`I-q)V'(J~LeYveY`]ܺYPr,K}sU Q#N(EPZXt2 }SWfk&P"mB#=e0FFaJtQk $B} !is)dUf j^-JLPE>w:z  ;ϲeeNN8K0'>PNGB X&u|G "uwl*vZ "t"3{@h:mZ(J;{#zShCyxpBotdoi|P:1!8僜 !IJ;N[S)Nɼkz'| = vqi8a?CRH)izgohSˏOxv=z+FHBŭjV5z@INmsbty^mǏEћNcWpB3h;BN%1p؟~ Z,@ehj)ZJ2jk2яϟu ;^={Jp‹G-NA??{9P߁t&0m4Gz@3ZV}[@mMb!voe6fXFT]k0)5'z Ue PxYThpc- ܫnVЉxa|I*ҩ0:8wi``yAm q411@-L`Qj'4ce;`Vy޽SuyJTxpAt콿=~K>}w%>9^[y{7=^^2y9,v-+I$pCp ,!g<,)V'SҒs3wYtSեsHWl΅$ DC#Ǹ'셄.-Y1E'G̍p 'p8Zx9Z퐿jzwyy @ē+8K tGǸΧA:Dow9Zmۏt×֧wմ4aLݩ`Bw$Y bhv5J[p&?cYm0`z < u7ʦ@x؏ 37U"JG~i<#e4Q@0j"uA¿n&t] D0(iUmڳR@EAՄHƔCr=k_BC*NW;;@ Z,g@ӡ6eTpr:riK[,1Jcr'5)*i> a Rk<ǒYNo+4h` oI (|:X&a¬f;Pl-qƂ =A /8Zdtb_]}9t|[0kbtU2ZdWC<y5?񺉗3|&2P/x\{#9_n|2q>=~$XZhEZ !4B/ fL -`‰`z}:)}vJCx6VL?%5Q| f3$y?!@X\%jVx#x@MθLDzAz I1C.&1-\N4פvݵKb|0(d{6Ҁ&Xӂ%5L\z1 \:ڸ4T_f>9Z˩"&LloF r|p$ٸ.:TLj8 ?hYWxD0`7t#tQs_RbVKgGOOEQ0΂ıݐMȟ {vL^:J<1X1ԓC`Br=?%&եro 1l4x &x$iBgƃ'Z05$9e/0s8TD{1VW&p3rie AZ,U%E+z&Te_#뛹XcGX78[G-觛̵›̎H鏀AtlCTOO0aׯ./r^ޟ@Ե$#Gp 31ΔuQhn"0!/?ѢF=UutwmA?|кuFG^ʹS)%s3 L8_[/+ܥV vM8إ'>z%}^hX(: WeՖ0x\}C3^:y$5ns tbB-3r7&p}aw.&=`.@Xf}[Ufn(R :aȄ₀@x@!c@Hp*KI(א!Sn]7}|q/e:1Wh ܴppp|8GCOhi &S(s ?k,/0AH2XA&|>tBd~ nGC6o^Z-OD'8)2L TLP\UJF7E~|G=24 ]φt%E#ZփVkތ673ϐ$/|OJ|5 & 2vc8I;lfKH0.K␚~4y qi_O~!&&`2k HrV/ 񇎅|)uRΌuWSI!ryt_\=}?|vv~Wa`?5ڦE\-ʒ;r枌D b6,V|]2O ^`W.Z%X{v s &=i%jkOQ:IMQHRx AkL!ng?V'Od1س8ޟ#KⱾ2<)!K6_.: `R5ځ V*}v3,!ڔL )lW &k &\c]<9k"{8[(58)IJ^!&P̓h!Gi8DdV`Ws:Q˫ 'DŽ /m@51aӀ9&tU$-(P rs&v9y0Vr n5ZCdȿ q>, Z6&lARv  .m1v7@X #8kD靜Uœ@‚X,DZǬ9*X%rf#DL9KD^,-=ߏYӘ콠LN%}W,I:$1!hL6DQ$D@M&D15ܒDlQ 3zѱܣ qX :l/'`g/Uz˅`/8=$\kyc 1&TR6D_Y}lFjR&oYcBLfc\P3^S̛/V%oX- \o"G*06@RtnHzJE(.18t G50!M1b\'c4$' 4 3S?3vw,H&PnGو:iJm܇ ۼVyőDŽ (bwv6Èꁌj|>WO0Q?=Yz7N>(1h)*u?}F!!c*]D%{kWպ2*wE'0ťď8Y0Ӥ< BhHEG "n) k nX睢sab-d d[_p6rA0׎w)nR,xǣީS]l=}dǨNf*2SZ3}E1Hɯh!(d8C6 @ЍU' `}>Xڏ|6ъǿ7On?ީr4L I*Kᤦn Vd^?1Mdi3~UZDZ"Z^QBtPp62 }afjXxzÁCz,TLݘlsj~֏cnpA&R*dDL;ѿ@M}pxBvà8)h)d2d:MTG=T(-9 =3B,H4DK(4NFr!k)j;ģipMdAՋdNd(O\,fnb `'2Fe(& 1f]Z{QCZ^֊,0<t+@BbصBKHpNZVH4F."&paZU{00VYC^ 9 zàxWGXGzB}6 4}ԄSyU b{  hђ) FȁtHťy3Dޠjph0IV,r &uNVv2+,PІllT:4g2M $AVV"b*XHT ԅ9FfO1Ed;o3H'tDl$& F6$7"x\>6-]g?w[N1Kv$ρjB4;3ǃ)WÂi=x`k`BZp',Ƚ& Ϲ\|FշBWx[FO.ers0NE:tH4L$L+b80 =:jz|Y}W)MHM B L4U-֓EF45<#R;\p I|DT'M30%hY: MN*٠:ݫ/>% N9K4Bf@D;mtO=ɾp7Ƙ Nª焫5)_J:du%dRr=, tG _J0e{+i͉}">aOoa W=`Eᴘ$GaVazf+mpJV ³;9+'LJUPyA)?6H`l9i < YsȝMQm* /ps踀ƅ$5D0ba˩UpҡP[9Mmݫ 9)/Ĕ.ߪ[g1K(dZDhPh4ϊ( 4 ejRi1tun4Q"pieI) fpC\4]`b`o鎦siLXH y+r\t73q!AhCdʁbikiM 0W* yOML*(XM1>NTt"T(2rsM\Ȣp%.֦>8! ,I]tmoq^Op.V t;- aRlMIL0>yn 6'uABq5m2 $/SdûkO &8VR@DY8%8n-x$$H$n DQpN0T M$(z#u5݁NI*D5X\cL,:+ ȣ#'Ky t:ػ .9,&LW^rԉ/t y`BjB|L , hm YDW/69h@9Z[ķ>V&շAnAZ˂Є}$k22lLd| /2'(qu !m^}QL[Dݭ.feԏ%U'$\s~2A€8" ,)Q]AB1*q?3Sbd5.An:ar(e=VJ[ֹ=`90wNuw ;_6t{ݭ#A[Փ2N<2aq˟&& WgcX; ]|B?,V*1G+pKH~ˌ)z?)+zPAPLjVaKCR1; \ xl{ZvӘ@ٮctB85J-{ y„b03qɝM'dLm4(Pf:E.:4~/ OW$pJI£rLc0c90IE.\)UOteQh+}$HLјgy&"Pg3ORȷ6xmHuZl?889m0`r[޼m@- rzP r)UyXX $`$˷YRT-v<`-Ylv `TNs1Ys6Cr2XԙTNx^&]hf KoLѰƘ]fLMU8$w 0 ѥ$22pIAAL˫8I m[l  2Lϕ'*y e\c,j$`s&d!q S*r:M0V8Gn#p2tϒNx!XO/``m|+`JC埣j Cܘ<*cZY*EExj,l1`㲶C'.;S~\ 0*[R h51aGCq&9Gh2B=Ly  sZ ^r%E㓌+P4ӘO :KdmN<%A8Cqb$:50T*.0T`޼"zg&`7/]{u؇g%{l42暴NEQwf=$! k+ԙP69wx!ZMaŽp#0TFo'O8GU(Z^QLYYXorPYo_KV-ڄ`ܫ [b.z{Uw[lFޘ3\+,m[#"\a=#] BהcԤ;;fN#ECϹ'NtrC9EŽ+j > *" hl?2P]3Ѐ-#N 3!y^:|#dbӑ^dyVj98b#$:dP+O;:8V^ε#leO0x4LOCSØ@dh H8G+,.7qeN9Zu :CU1޽u֚-f؋,LȗD; 0:)`aa &(%`cOO (hm:.3HT~s\` J f 30D rvn*E[P(cYeλL5##_,+&`ݱh| n?V9Dyց 1!^g` ߺ CR1f AێI ;h/½rC&b~" \a&:zY#G3Xs]HFk iv0L(fHzj x͏XnjoJ@RJ288 htnP6m/'ȁAC6)b34UjyL#N)&M^ƒ/ t;*6l8Z9٪9-.Xpl0r@yx yX~@i 1yVq&9ػ 4.C;==\H\ZH Vp΂sj .:P3v8)nX (n1\@-kM>>츎y6}1GoBS) :8Ƥ$i)@I0Vt.鳥jEU{IualƚOB2h(Sr9d]#NQuצc (,"|k*I+'KJՓM~{&87W` FNT(Ѻ,3a\e-~B 5BR / c%I!X͎ͮ(w-kiNnUL[^ ´3&c9ٌ. fs-.Thqԥ 6~|# %B9GR7zNYzM sB1")oFw(ܒͨ"!=p=>t c9Pi]~Օu;S_h/01ГhuY=!XRJ/s.mAqgPMJ|z2elMW`3yEZm0-qZ! X *tC8%х8ҙ]ZQ-ZCɹPYRy39Q,ѩ_t gtԾ q,é Fdc"nCK)M|[ ȼg,-Ӝ:aKJ1%scKVI{#%z3&`º r*iSTBNaV1JB-{I~&;gO֘q@CB#E2a@Fsё%4(bt Id\ZGob <]iA Zc,ѕ}NQStj"ީ2I HuѢN1H&ӢaW8-WlIVx‚ʮ|F*p蜙hSk "0 3F+J<9ަx"# 3'.#Rv$8Ma/X!o"f)pJ*f ϬOI>=os0ar_D=!Y 4R d:-\S!otVC@ Z!xʒpN@g$]Iud  A{UxøNW= T.Жi|X&׌(! pv[Qr%p8>dz#zac#(uUg80% i)fX; k!FۑpTeސд`ɗ@d`qfWY,h9jXN% CB1&uljxXZoLICG\hOQ}MҸP6J̺=!z1| &fmcmZX+biD( |f-7o/:BZ~q*(IJact50,!"8ˠ G=TFCKx%2ޫFӺGk QY N>qJc@ 3˳bLk`ӫ:ݢ( c=0)ԯ8PSGBpXL n)4В0twUC)"  ;)\UV^K1}qRdk4/2 ; zXgn7ۿG/A;\VT f—[bB+MӲ{-!idy?gj6#X9_C ^:j79PW@^s0׵W wk&كֽ<lSR-QL 6D٬  儞0T0eR6*,DXtRMӎ8>*͌ g~JFqwt٤6G*vw@`xLX %+"vakm Xh'JsL`O,Ah'`„$ %po)GT6rGanf+Ǣ Ƀ.xZ0b7E7V~g9{C~E֠`}KłTzʓ)cYZ_^a`R}gIY6,UlZCDQ?USKj`dIQ@$ +z lk ) B9 6bRg 9̨:ԛ=0il'4`;oFD]|dI$C|<@a(D*6S% IQx0z' 5l$=%хZ8Z]x3hk;8Z7} & :<-@W>=Xxv|AHh.w%46e'zj@ UYsBWR S[H̏]Hd6mf;O2ÁcDe8!C=El(FX R'pس< ɋ) g= ;ʆ},i+-i =8h^jce1+Bk))zra+3'r~{8Zh4'8Z 2,$<XVE>W K]CP%@@ס<:vޱe%90ʁr]!aN;kJ ?Oh~-(Z~M/*8Bdn}&ƊpPCu4=d9tnɇiUfl<},4*J65gㇰ*ص!HJS&;$R$'V{sJOы(h\` ,7"p{2X8G9<Z霣!0<!Gk刣yR-˯ZXT~!oO__t_y\v x?GHLL= U:Z4=A9Z=bmiG qJ12rciY/r"oǢ渋厏|A1e/jlōTȎ)QHg4ӟ ^JzCcE%BbN: 4^"HԽ}h$h(VN*2os/Xf#JQCzj +C'WK;F:ۢ*âBØp>݊(&7n0a Gk\h]G+J k~eO?=5PYx~?m7NJw_>s8 ԣN)#ҵ vvxWL#wݨ,@XP5">\Z/>y\ÿQjUS"p=Q6PXK 0F-Մ&P:Qf2 C1 0MAX% O-my_xH*>c(Aatrzp) e`x)Iпlr0&(ڦ+8+,<R%Ht aɵ!LLz-7ctmBh-˃~7hÛr;/_/|;/~oW9H &8[/#MsBK?$5]3g"33+XL1o#\Tf;l'&7UueUBpaJ[(M_u tlWcr3s۸~tr1}|5N"{R'jccvN)̈́CSc  hQUs0d(9 +TVTxw='TR6!ι&IN6fa 9Z_$5o_RrW2/1 鸻OOX5h߷`VQOT6Qmt-D?E<.". ZR+x؆#=b!FƢU9p䳼hL3>iSG1L )$q=V0Si:0)8'pLS`j18t.fΠ&}N&Oޔ3!deHee&DBq-JϺ(TSLW6-UXXJ^9aBBiLXoNØ@}B JB„Cة ےsN1n^&;h5Lx7}!EOxqhh|a/?ipMԅCVv hHԱZ bPOh9e$sqI`/+yґE@tOş-~ePUEpu Q^X-~̣u& c1a_E$$PТ&Z@PY IPN55XP^:.X%-&y=) H`’ֆ &;2\-k=PxZfRcGXx<(vD&nX10Er9ZGar7\9Zp Coz2F*m!>EF>6;qJ j4` eT{q2B(I, KC-S @,;~B%74VL-񊽚/_{1I!cL92X=0 m3ќ +,QIP 0T(Ni K7)4Ut 4ŕ6dZrI)rl =F`UchzOخg,a[iʘ>|V[Y`¤~nTLIELHl qpⲇ1& agE}Ąhyzߏ8ZEn,]Ѫ1IGӢ8Lm;;83$0B'"aS D+5ܰ ŨТ= 1:5.0򜖌7_4I=.wL 4/!e|%",*LcT̍ "Ь>LPL(pI2eUM`pJUP=b nF"Ujs$~*pctDBV5~bJh~|ʢ4W* ޹zRQjDRObӨbZŔ}? kF+^o FCD5ad7MZ ͍2^]ȸ*E\Q?Adp(e߃=|q={zd1WEXIH "⠖8`;sӼc-Қ`@.X-6T|ʾ& 9)HpAoL,dBrYLY->Aa#x R1&Rt.f$9a@$=AFzU(4ρ™tjEeyhc0"b,qy}2OBZnN)&wIU>(Ĉ9[P*,( JT U@l1cOxƄk 1&(CZ( BKמn|#Ɋ;\`#8] $b9hڋ=.| Z5s+rl;Ä&.rIBH]-O+ۄ ~_(K83 3py`^Tv9„V$ (qhAP#2ζF8d5!&$H8Ƅ8ՙ0k ct2Tj*^y-T~+(RuN@Qv;gIJğc7&>A@tx\K=t2$\lT#DQ".#V 9g*5s^x[¶lb1V`>NRV]$[cF֎T9R%sMYs7!ABπ}T0D0M*AkK&bPLqT `nrj= hunɲcB3ĄգQA] zlKs-;Y"|L `Z@?' l .puY Z)M4u|]+!Y2Hrb2\mAhw"^cX/?G_ W~"(9 ¥9ـ';~?(?{m&YRyhpbmYwϪ5hDu:+iP8XC(K#Ød~ );R5Uח\WtQ4{꾀AX_p܅j4ZEd8Pf2/ (jÅCL`Z+"?C. f-iuA))'J4)%A# ޗ9t ;^v*&lmrRBm h ų={-,^)hWC *t∨kTC^c ?,m!-/f`-П^\,!J0sa}|a@:..$8(Z _5;yُ??x4ݩ@P:{@t\*! vPLRBXQ@rӳ(AR`} hQE&D JVYX1( BRNP#G3tDcO)+*ؑohbkJ@:Pz'7c=aC@fGF5X]JAvo/߸c_}/^1Ԁ[tCѹ8O!5o?G|[ ΉRtVfז4 dS$J`4`We!VӎI5WdWwνQ$!ğetYUpQ|5I1o?}ʨ~ oU(=k^ΕHE@QHbfLP_攢`t/u)44"Z{4uFf#Ԧ2+֒.?,U_{Bݸ2UXOcB%X@I*4*85 LǺ*(AnԺq"'"'eZOqWj@:S#erH }Uȍ eG\e(WcVt zɁ>i"#9k͘|2Z-$(BDsXe_ͨKؼ U`Zx%4Zbˏ8ZB *\KES^fBJ޽0".+qjS^.r%T50x K]Wϟ/0*7*w|ryG!]\\\^6k<= ' ޼y" 7_mz܌LSM";;R4^(@p@tu,ZFQDn& qj4c(8ϐϦADƉ͆(Xpp{2150ₘ pyz1 >7 ъ_Zh^c8D0Цk3"P!á܅FUHn ?QMo̬ @::l] \8+Eg8C9ELrZ^2X10 "#sQ`WT")`j̓HX\n/Y~xRnK.\BCj}qKNMAw'X s >&U.e-@o,lڵ&5ݘCiĤ ^yEc[(RƖgfӝ|S0gގ^x89$lb3{/@B]Q`8N|.NĘT%p:?/ (8=Ct!RP$_HLMC B IF8;>8?k71-`Qؕ[_Qz^n}`†80?>5;~GAJf'ʔ,jP8e0(xĸeM?.E(I_e~ u 4-!Aol*q%:beA) +bD9&h8^5=8 +a_}'ȃGc+=!]o˕z=Z[w8_{f;,E,q:P'E$qbG7!RE䍃|p"aeq&PS\cXܐUG.]]sם rUtO _U R!z{0`k?NOHfaFInCc44$ ǻx86GX=yW,+zº~k6[^5ٛźkm'EMoظ^(Iբ#ZpDo^8P`Af>J{^_ iϨ_<'Hlѣf<CUBߨP i51 s.X+ q3,@s9 EV`?Ns=gI\5LЍ  3f5~>O,g5'KbLlKFcp\p(\]e켟C0*F`um3+'.qU` bu&0p ҐD#[(vB*`Cv-,ApxX 9MSsR #s~ں70TNJcBIJ Sg4ڣ|ύ{{w]`|A[1̧vl0۲Qyp0HOH8z]O^潘eaB wowjPp]^ ӡ@F -K6p( \6&Pƶ,+ XWY*•*%^X,'LqO-'=OY7:&%&I)u3cL<]?+d&B;3_ó۝|^_ ZOxDl^0{&6;m8tvRQ?-s15Y$\ Z*BaPenxVt,v\8oK n#WYՊJ[>O0AZ„ షfS+ ی&fG `>i=!0!Sc*`PwإQ➯NaB&L Gr}Ħ'e%iƝ@!rP@'H{qkY͉QT}Bb9+ hBpKSM0|AωfR22ktNNT{éEۋ `r0C(=aaVпÙ-PN]P +Ll`'G'?,|t|Dm&;03QlipD-{G*2e-{FF,(f"i 4wV agK|az]Õ%-Fp+jxyA**ܰ; "J](? aB ̬}wyOp'h Ncjc0mZ ݌ޒqzp,rl,4h~M7 yN)X]b糁I:zbX`3iZEp6>@| &]=IJ98kิJ/aޔ ]:ĔqAWABmRY|bs}iOcڄ_?=ۺOvg =mJJvwg~>fM'lh|Zpa,/ VĶ&!x}#N4( &4@ cl^9AG.i zzEw9[ 3(=…%! ໟc|1ېPzu%u/?1T\Y K˒ rASG-w uOA '&ĭ 3 v 6]k/9ZgJ!e,>@]Iqbkx̘iY]qф%08"C-b.0;nuw@HBD&HzNV1A^-(xkI@$ TZR:&P_X+(y߼w]O@;/|{Ƞd|;/l' #1rp1Dd*$ю` YZ8=s׸{GSE ,bLXdq#:͐A /ǓV<4m BE U'8;\S6 uEdެ/ lb{A;s\N}M\-"lkA`&uվ:&e*lob5k_9;ߙ1U5P1iD #)*@{vKO㊢kϵhmdٴQ #~ve/-XX;?Z..E bF~Kz Ae5$)BJt$ITe–%- EKs] lv^BzKV (H-wBo$1.E'lBʨXX~]`$C`eȴ eǖ̢+ ]ȭϊ#m!p`S,*% <0"9-M,އ.\ʱ> >y:J9ԻjBsheRR(8 3\M%(Û-D _vλvOƄJWG<Ȋ}q.\$&:&b+ gyHAJ;MJJUDvJː`k=׬:Z&>qmfJLq*;,KYJ:"$HHHE`H4NqZUn(1g9 b X ;9Ryg[oEӎ$,l,srH8$we<}D~p rSjyÿJ#! gݓ1R(Gesx~ tF3pez?#ޱ!V( Qap&)8Z% ]wS?DŽ7.|}5L $a<^& H^XjT**FԝŨPUוj'͝Z(׆ >/as @+9+a[E>~;EX#{X)%@諰~[NWܠwnV'(^+.;"GpscJnC׫?5+?&|O-k/d74,0U{ T6VhpVJ^$B> .z.iױg".li Ce pZݶx뛋zwyMw0Q6 q4Fkњ5C QjܤzE3o-g\OP/8pbU@A~-Ê`@GL#1ء|!+ֲnPS9C\>ʄ=`/_=|;O~?? l+4 غ{tN$6ןg8͓Vq 赭9 ;߯}D;>5{p8Vws3nS# };/4cߨ:F4J.&GL<*eWFko&0f]v+4[竳%bgj`E..I+, Ѥ )@=*s|1ڗI`ɰf]~Z/a3POoB4wmO4Z7>[`sՋ s,sГr'BJFIs/~4;!'P8Vr.&8Ce]Qi.'#ѤtK\Um3x#`w-Z>qH:>C4 t[N9O9Vjh'\ \ִz&Eft o?7% !D=r3ANM?N~'=N08aÆѥ2 VD !v5Z3\kB gVŮأ*qz*Q(ʪҚxbv+'9V9C3УEchIJJ M l2esC"#_駟ï~'|F;d~"'QSe='/UChz]yDU:Wz rO}IJWo%^W3xb,B6^':߉@S[HE ZA(YPJ׹Z94,qU)b Z`Cޅ㯒CT%.I$̇'NWyXl؇ ]л7os;q|"4O,-Ca|肧m{P_ ppBqr]^!F#^AL4Zi:hMG4Z^!chJ@1bViy<\ٵc7-V$|'ةg`{ uX\΄jtp5j wjɖ;V oC3MWĞ~Ad,I:O)GRX422օ"w<#7 y&~1 e<3 uEhRJK[Bo(0 K& F>>6d;h'I61FM)_;L&f 3"3?ӳ '$y ybH'x*Z/ƱHIJ_l{`PzN8 j"hBI77E:o8wzœ=?! 0O{pyz FܷRVU3qBժ AsBS0u +cE~S.l /*V1H M.bE'k-8Wp뛹j?ߝHR`x&3A<|ǤAL˔ S") fy˜ ĉJϗfb-; KI6N0R&9!v {8af#87(Lᤗy`|0qٽ3 M:8+8ˉR:Pm l|}baF.6R SS4ZoLu4'T[rVH?ygp(;xD*ְrڒ,+'hFr#58̂КKP1R&EsR`c9PZOL,-'+t],Z2a+$ct^OiYb7A )j-Xr˝K4lff&̄ 'Y>Rt{v sm7JgOp!!!ϥG #Fa0) XP ܑU qS13]>^5U%v%9OA }$)MֳF맯њy;!'x֛=1G_=|Zgz 9a48jƩO0'y}F%R 6ڠ!7`Ӝ&Z.5sRʴa PBm"5O4ugn ZqHDV \OJea 34BSK3yOX%9ޓIKks''1;!0&8.ɚuO[yz\颃RAUF؎-,R;q2qeHwb#}*EĨji7]֟GѺVNd=ʝ-zYi.k)lF˕$xZMV988pp+[!QǕ?*},3ejߢ`P8іnqB5)ѨPxa-ۭ%Ca%̴݁s(D`6Ib  , fQ!-R, IF0N #u&#HD@s (GXV>#:"aNXue`k!x znE5]~ނA ?9du4Z':RMu5ZOjN.6Brow+Uzi5>(I˒>R$: p OsdA52q4B*` ͮy]pg]yQnה䄙oX+'$Oh2p:wDM@Zx_[ǐHO9xX'I3 6h%Dj' 19N`Uozn i>Ș0Z!@e="ZE&jtgQv:(:6X/Ѝ`rBߡJYvܺ8@NQͽ B]4ZkGhtתzPd^SuBd Xq2U}]w;앫H4K :nMΘ‰퉕V~z p+nTy U||%ٔ; 9qe5_&j',0-Nph#ME a|REw^#b!:=KX `G=,Ms1-%h"RھC69!j 7f'u58"g^%uANӰr.PGZz8v lw?jR;儋rI ͳ~/^P=q%=Sjzid.įa$:; ͱ|( Y<$3vz~G! k_z$ƇEݑsQZ6"p,k4(EtĞ/#yη1aZ&҈1.b# 7_NG7 6֜,,pH i#i) XQ*M$Q3=ƞZ&q6 kdnv0 tp7)P⥉f햦ѐ{.Y!qA9AI`#t6N #n\pMNO3F|;dEDr ^Ucɪŵ1I a]q]G.Q"pL t>pHvI $`?`S 04l: 89KLSjf''laLdX'6v;F+8,Z&iP gu9u3mLI!NיA0˜&dyS܁&X[)"1݆Ba[Ik&.F\Oɠf _`# " ɳFc͊\XO[Jءz5 սBs-Cwkװ.iԑwcIhem,w zYU87? [Svu)@A4?5:raQH-1Jv@5 IBSOZrOR؂ev5U `=SrK<9m]N(ϟ>xx~~(;V9Nߺ&. Yky+bFÿ́.˓f1im r[#HJ>m,*2B Gns@Jб YG6EEyE{hb%[3Nr''6Nl4;` lfId8D8[Zjuz B >J8" cU3ah•Nc&}z 1n+!9%[]kHyյʴdSN *~DaA)`SFkKf.8?̧ܘOkr,CYZ hrLOSVΧˈw&ԥp|`j7Nv:eQ~,u7ݙZ#C6 z/UKNB>EZ0F? rUuw{?G)!JҒ+8` )ar90eL>i$'!'4_F_nG(lGEې5HA}i9$ݴU='ZdksP!muC{.3wW/__s77嫷:"0#8y.3^j ^nVGT^V0,Dx:쩵8_T:Q$\ŊNf{.ASh/wX🶚_ɛQ)rNqJukC7@/?ݿy}\S>___>|zq|3ߐR\ګ|xÇrwRBN*0@sF c(B Q5ؐEǗ;I +h֌mSptcgZ Ll.uPN0;alJKnܝMS z*Lfx )<ϳ,A2^2T=E@=Fv_6rBEpN4~TV_?>s̚.%?{goэ\U1ǷWUp-viH B3M LpfU 8Nӥ4,d…P)'W &0La$)0)o IuG?<@0^; X4Ȝp1lXq x/_ n41JJ%x$oG *R;A) MVE2pR3%* A~%Y >5Y;s 츝@Àk.[9aK/kᄭgD>0<<1Ak, uX_AIa~'_xooC+ߨxM\H?RQbf-v2€5HdJ[% l1wRU8aE/$$ ᕸ I#]hbҕ22Ì q vBg,!ڋ;*P7vg8{%~"tO='$gOw=-.;?>xΛ_.5|0#-f BH1 g7"X iS%Pf<gфVcbHs2 aB[2pZ0 фY!.6 6/D|: CZwցڬa# 'FzVToNxWKcӛGwD2g*x\_yb\\Cut&)&1c; *^^IfK:;!V^/=ݗl'<N?Î7=t;~i}"-g*I$]:rI DpSBJJ[CZ3PB_&,FuE3s'/&Ⱦ - #Lh>0a(# S>:'w" C3GDgvtl0ߞ?|YUX?kjNbMKGv;_ˡ3J@S3;`QQj2r*34rhT@aFw-J-]/N;gFrM ?@Jގ Шp6.xEGSEͳ^͏ 8ˁ)+)- 7ET(2QEMJ")A0P3S%4s ]N0iR5N(<ǖ38".Nn3Ε&s9B, r'?UY}tuEP'Dq$v+a2!'dsSpü*6 2 uէ✅ՃDKBbTK`UTThf8W2p&F*N q45^u@k-*C'-2NH ۧ_?̓OvYr 9,7 gH8k$y!@-@pc-E,L"u7=F$%K>+FCvp: zCDdLsYz}Q!ѧOvNxwGgsrKv AD<؈"mأg8Q8 m!_"H5)![B Q1I=1h$NjFhˤ˘8A:ރ iMnkL['n9n0?mRCQ6a(b BvZy,S |! /__hoNPR0N]iD K cxase֛uYPXbS4~\xyپd}.R]Pp8 xlT#km-kŠ|cq&t \ 8>'Ĺ ޠ29!8ânj%9X Awǭ)ACkN PAZF\49t/69a :QtU@h'?'eOCzE!̚4\"c_W)%|*Ͱ9p25cSc)NUi(TRFh:iEls?ل,+֧ }N8s*U6>w=l8{\N?U"A).PRx a#C?GᠴZfB{ ~6@cXA8BƓL"o0(-ghJH$H& %Rqvsx3@ :c tf}`v8!S?E3k>8p_ KŐ2'ʱzK ;.%qK$|%)2珞B[ 1R>Wq")'{+K ˋr \s@ˠsTӂdQ)AC=[T}qHz rB^{-[`L "ejc ' ;`!()W%R$޿/ݳwS$g5rίu3_6K5g Lͫ&Fj2x"іЏ-JV@!v2khף@>b`@ Q N,L!p DmRB_|c K>>%jF_1KW C%_i'xNμ =Q2w^}wry=}Oe%cY#Ȱ㎛ܻdxEE:(jݖ6sدJJoR L#'&}EH [Wmf9uNΰ\^JA Y?ybaq% @L.y8]GIN3ɚ^r٬KT?+.sO"n |qRla+!h0&@fH$ˊV.۠ W(#D$q\㋓b*ELB4QI8]XH!vI: ˰CP0+\I^4X8 {cNtÜ`cަUݦzR޳5;O$9LiVj:z,XPEdbNS scS AtZ&Bl#yeZ&6g J 4HAk/=snyҰqBzAtP+{Ft %`<F\B?Gi7wt%?Vvc/,G)5M9xDBԭ,Fkuk@po;rN0S@CQo{tb>Ԇ >ɰ6*:6UBQo7UQ)-P$pZFTP VwR$&,G|KR(6v ؚRY?DvMd"YFvzFk_?A6uv ʽg9+huE:9 !JL*jDgDH:vvV-/hKn{,p $dz)`$13PBebyB '#| Bt X]Y0`uѴt< ;Fk h/ &( mN0ZPh DP-Y91Vw=.f׋ZD7xlت UIw2G`V' q\-qIМŪӚB `a x qW?w֫_ YHX`Ta+șRcvi0P΢qHlRortp[~ȀmRZmXA O%4EXL<*1☣C1E}H:yH[΃?V))VT7\z >*j.AV2)Cm;a[.R O ס$MPf Vw}0's%Yx>Z#w"PWygT"J-=,8`)M5J8.O` N~iJ_s>^ ~R]z"yF8c49aSZ6lnG! ୄD;}C1A8е_8]=.3jЗo}fӎb-8u;RB[}JII!~$2dD!AY!rIGq/?Ac_\zcGNAm>3a''!?o'8Tu>q 90٧X{Y:>RWNk 7w_{dVFR1EeX#QAR+0'1z39U&Ł,l|0mG {p siU 0NX$7OPc݃򸙑Bb@Io$!7Cݦu y`= W0F*;A"̽%I4q9$ 5 qU)cC1v8!"f) y'sAS{iFcF(pj'؜6'"AX.VR%vfz6>٠$+mQj*%JHM%͵Lz0m''[}LfD/;,ϏbJ~ p<0;lXo_ښTMm J)5 31:Ѳ0.F@ a_ 81@Qߡ3B ΉiBH$FK?k{$t)F98uӀ`0=mRlmV)X-{b; ;wxUKAƋݭЃ.w("]`' F x"5Z 3IJм 7A"&]RPBeu o4E8!dBx?0,j/ŨjZ0*s:Hl$8A%^޴NXk((OX@':u!2E_p6h'&i7@a` @+tٗ p|Lj+6>h2hA!15  ,6Ȑ$H )h69+rr§0E1CA`#PXf(A2 ]2;a Ϭj0PWWW [65YS5vhErB^hQ'p)A,m]pa@RB^" ,(t2మbF^ ̦I,B@R k.`\qlS( i_v**~qBFR> ?EBL_2Syo؃"HDu3t.M\HG"GQ-ymvHl``bdhB,r +Pu/xN piNqPF뺥˫Vۛ~Wqk\?֋cxf_z7b%FZ f2BĖ4t*B%"fBEm풹ӪtRTUTE`%έ&QNP;n րT:x%D ix`"=}~5ľ7Q*[ 4NH6'7<fqۅW0]qcXQ鐡m] #c* T &a<aA!^$Ew$gJp5>šdQNRPw'0QOc>B}j@vm5_+ ʲg'f(44Z/-GOPVP}}XxX~ѣY̊,3[_`c)O5)ljnG*jl+'Nx y.Jl)H"8!6țPN5^]N0=Sl>xl98K1xrY%g}F^|DXG_}Y 59,I Q+`=j5yV7)MZa@^B88W!F$r&"jN8I?d*ȓRb13.f =;a!Co$7\ GG?MF0D !ȳy)2K1F0>HJ+hs\b Ƞ=4= atiiYT'cy+5yN5$7w 䄾p h8hh-nǸ]KBj9FBWϞy[:ؔ{=3[)nZ;t&-aM4Nev;rb:^HtvKN@48ach-֍hR]An-܄b$m<~_+0,qnY{>r1BF/e^79y0)rƯfèyVR^ɬA}}G-dB"D{ay:',Ѷ.I}Z; ^>۷UI|=Wcŧ|{R(J iflau$SjXeY͔dԕ&G\S5 |>pYE]58)CT; cHA Q !Fn#X(a@t\\ u 8/ǂLcXa pBRs!hpSa섴l(@&:;^7I G9CV.0Rj͂Pvw3[/J0GztM%! :trVp>\L',pӓS7wӌHt85O.Zp89P ;HN&8Fkm~ox =$3 zd2Ƭ6^9{AQ+3ᘥUULv}ұ[ 'I3%] r+N lۺz_M wXuVC!̒hS#%E[Wz}DWnXK83;{ZlaQMSJIh* d-~ Fhmdў0-?}< 5]q}Z4mn3E9hEXV$K4)ߢ,ۺjߏǢ< d s^kw#\_rB[ #s: PrհNw se9<,/#ha%Sg2U %fx8XU덷0 [Z pB9G LlXO!1g@7 _e5Z7Rr7N )%`=1FZyn "ƈ55f1%P@3Y*fyg,l{(4{xV4NCeM\ЉRGpP*+ikUlidh7(aʬFd\BQ "@<6U'MfDSۨ 'T3p8FC0A9rH@/.Ѕ {AxcvSYV:R:8a׳itF/D?(g]E ~ >"YCoXwvg/נ @9 rE#J: 1݈}9uꎙPn`0V6\ V؜5n RB%Bԛ=3tg>'8՗R] M}=Fb ٗ&溜7Z-uGΘbTaX}t>9FhlO-gRs@!#[S9U|k~\x>'Ž1`A%Ɉ g#lt}> B˸^A8N !F eO#iTvFz9}"Rc GND"/bLD\ǰI 1úT BUsB'NXxT &,q: d $_rT,,jr LXdJd`7Ͷ'q!+̿@{)8S pQ_aXi[\%X醴Mk_@4!1@AOǜ(TacN/1WMw Fi4Mfc.1gDqDct!!/KIrT8@{0ϱ'Kle7!t>ML/EuC&"?6sN1J \Dh- D  'dKx)dVBS-RQE)䤪U3dNfi >꛽<,.8xBUyM'.a[یbҁ+ gYr 2r 'Fϰyyh3:?iJNB kg|:,P<^9 k_^V1P`kny'PuL|j**(Thhhh 1ԻJsB HM3V:Lv ΢ qaX%M z3GR |5Z,fzK@`es[9h߂s%IdH yn O*(0F_]8ypM0yйֳd,gUHfDS\lX;)T(a4ck0i&O#bS!,p;НpXanr[w[c `X6kY2 #VnĠPQ 3Cc.*߷mbFöh"=Ow|$e*Ҭd$8 kByXm$ț湾-_=fri[ny 'y( y? 8Y`"iֹ Qڿ<f)e춪cCO6lg v 18,r(Zd#; #D FPǛPԕn_3@)@16ڳ$j @=`[ › dR?=SǴPNzܕ^7۽*j1g W:BGhSZCĿJ!-)NCbGHeu8j("֍u䱃0NiUjB\%DS?a1ɋ(XZZdiv2 ևּɊhBf %IJ-Y:f)hUv T>JTs2n1 XV/& #)S~=i!<%40B!,d3pb k6mpk_WK '75ANJ@u``3>P_MٵLzx쇗ٻ 5Z_FkդeO?R2\H 6)[|D~'p 5>'FNÂC_)'y( L" އFB)d[%(Y -RaxAXz>avj7z/V=yæsJwq\qhjMF=%g&qp`mpPWѕz" @>4Klosg@+l.}fu@ENM"`_զъ cer^< h5R b NY-oBs%*6^IO=R```v>pMfL"WϤ[jI4|ƪK6 =Tki>% 1ֶ(B_8]wp\dt9auBY3Ǩ.e}< _1F+a6 zvVg'rЉ%ZfwƝxw}`ݚwk{YGox j'X B;|ox\u%'tj24PP`;GAUh OFk`Գ&)GRI ,InRm eqv9p'']+z'T$2BjA:MNpP2P͋ (eІxPoų6G ^Q.Sj5['@Kh̊i!# "lfNBwu-9!rVN{#cF *z>hq›G/L^awX@^8 Up[[i<IF %9z=eC&3eVk3LYG1NΒrBSlU ?R^2h!/)̓0=3@l)xNt$JE8MAD岡N@(P90TT 6us q$hT[Xs 7J)<nTLiկbwN)'tIN)Q5;*Khؖu](g_ RcUh %j&\`Ԁ`2ƾ Y<ԅXd$۰@X1wI@ZF\C#X5H +uZoʾUV"31fXXz`֢@2ksv\n@0Ѯ1};,@}6'@@8!@N91E_:XVp~Nxhpm~v9!LzJq$PXb*ㇾ5H6'ܨ@ȱV<5k6@Jp2p#.5'т%v0E ą CUjxo`nB 俵hYز>Dp~+`JQddi"#Aa6bNV;0J#Ъkf"':[E'9RBJ(F+8h;f݉:*-&G \_PT82ZYm-񢶬 's@A[Jsںns:J.v=Q"V +7K8.y!m`CjYz 0L@,@ d4brH0vBufiv\i-ScάҒKt^k mZ i@^҂:15_'XO7`Ųg%o]N:FkZvAuݮXAAFZlHQޞ\ k⦥`B%H 4 VчQNN8! Dv]W8PB FXvLJc"cK͔|6+hNhqD rۘ(RIjZstl#n++DF#`!?KN h͂J+uFeWjFz7@J0vim!4)e)2ny~pF!!qbv|9BeYU!vPۺ`-E?$7,F_\bUt^uPcYn0ɮ5>!C%+pQ 6.'(\/P!6F949Hw~b3 ,0D TVj'O s,OPW^I0GBٝ*r >CMBIwJ*X*Y؄\h"p1ۡ]mL#E\'xvR~IJTX(av٤؛f!on.&C#|V/R;. yaX{RA?UEh-_9K ޿u5Z7:<Cʩ#乥'lYJⓋPh  .L'TU4Ӊhr'8I|vFmz)7 cw/I9OuIA E iT0;&rl,b䉑ޥ#-w4~mq$BNY8XX /<.VE%2[nkec͒C> - 8AL-9}Q;! |r!ֵiFZk6JMjjnMjK!f,ю& ҺQ<6##\3`f+F6 tgmEvX7:XyL]|J y>ͅL / +tNR0ڢ]J>*%qBK@@c 6ljE/"0a?t4B@ t |*Cme U,%RrBP\_;@ЊPyǡd)vpRhe:jSm14Ob4lpEAKbI00dD{BgF j+|W뫛KR^&ɟwC'$dDF"#ia=E ' l=g#kv9#H9 00}[aqlVvsO: Yc3-[ڬ%Bv߇!+"\Nph+ĉ @F2aΧBg$(-{@!&M24frzyp#Dab 83IWTfEo:/c"תOI$%HGLU\H}M`CXgKAWJ +D휰ˍW#'Dwuv 3̅.\mMæB<$l ֮M 1nJIc@#u_ 6S&Ƕ\i6JAaA{8x0V'4%7 4',si.~>`(\tAR+<j<̈́8'^E`j`_X![sЫ&%~w;'XFx B Zk~ Bs\!8δc#l9KPiT[!1PA; fhͼ`nC* y=dN|htH/md R~ǹs'Fb&,@s0B6%NZzF~%mMީykn6>eKךg^4N(.'jR?t:qǿa%on36艜NrF{ȆзL6Vrim^(" Һ(Jـ+8!~^+wx;x{śo1$ j+Yp\i}.>+ʼn U%xf 6;Ќ.mqP , MPpls9^`կkdWƂAla8H0l[|!vIvM>0}F  Ƚp҂Cê0Wk p熦3՜˺TUN` j_幏$P81)KZ"}Χq>By7bghs"Yy\֬B u袘 QaH]VlmN/1՜)8T+u_Cp\F oW5E 1N%kąPWpz<8ʋ(Y{F5%|"jW_f t( 9Bx^aHvG *X Ye$rjaQ ,h([֙Z-aE,1tHK}#cJѤt~i! ٰ`tc6It$d!뢧Hz#An~ jYB'`pQn w(u_C>$5oޜ]+pt9E6N(-ٓ?EVv*lsfXåAk 'õ̇6J0RvIXUo:XXw[!EZ\D[MM[XzN L~ OlqW10[TwIDW%J`Uʲr` BߍŠ܀WO`7 @{X5I!W@)5 djq*xBa̫/~x{q~sKz䄔C'5uۗ/ߊx>/[Mr5.ŏCeeR~x^ EyxZg_LNh5iO`̈́Pmw3\H5gK\m`>sid']h~kRI>.A6 &J%t }CIt+WY&;wN&/xvnD P7k *'F+''Ok2BCy^FYPfs~dI>bkqŻ<~} -'?Ǽ%㨦$-# EK*?@ArF^+'H`B%W*Ax-bPVr+ jI\4J7J)k)$)$`X'<ӧ}IRd󇏞?z}4GO~x{yW^ew߾T?{۷dm=϶h})<7x;ĵa)t(e(>^~9j4 1&1h]ߵ43ԸϪu}ktM&doc0ػ3N jkP\p@h9⾰v.ݩNbwYH} ʬĄ/)GL~Pp{sqqϓcӋ_̃~oTѤz7CY{*Qo(D&|87Aw_$۴`r;L  d \З3l+Oh2H.$(u:`2Q"wծ{ݱlnM̰Ϳj$t łOkB=-/sT$¬+azac%&kmֹ2Tfi2h Gp 6WC(xR6Sr˜ML-bAHL!aapYWl̄Zq"`(DW 'hn>>wWa>]}#eCocYFeAPcOU`BцᇟT! >cVan`4E2O2ً?1Pm_ <iF(h:f5sJ8<;u1*}Ӈ'2V; hwk"˶:1AY {:O QNhA0^)dVMQ|ѳsJuOhʬXe -uvi圯~ӽH)I+ DB"ke".D",juxRBa\4@ 05JWf b:Vt>ƚ6FH%rB{aa.1PQ!qyebP fW'Ӑ 82pu{`xt1tz(޷3|v )\1&xDRQ?.PyuNޱ쓆l!vI(pLI`zPP纫^ꮲ.)#ΫNZP1SVm8p\Dl]4gDBNh0aW@zcu :+mֻVѡ2+3u>;G?%"g#.e,BI>^}v.#Ăz U'Mc Xtoph+6Jl ZO- /8x 36`nj]l0qRWz"\ov?>1ڰ}m  м5kv/w_]e:Y D|*,z}@16`\@_iZv5-N V/bSQ);xdu/E0smV-N%J5gUpT3ZZD3  GA$~ C\Z '˞wJXF 5e?*$I *9XOX Ö xR^#4ܑޔ>lfRu}#nwk="9adGYv"{ ^PD#l](.h1ff@By\ nRE΂y@ UdT!Ao8E'P+Obcdc8GhC9W,#=@%g, Ph Oo'c3!}){Rb>9&RҺJWAa)g \lU6 PE0уüj*^|@0*B.Yy9aZɈ-s/Q0FTV((⚑]}%Cj>v6'I0u1ga)7I?MwLPZf𫼐cP0Ƅ{1n$YL@~gMիfsW juZ܆*&O4춭~w{ݝ9UuL"6g&=6Ͷf)W{L[Us\R"嚲=C^Wy)=4_U^\F1։|a]ya*p` 6ɺXN 4X{*cH(L oM3YN4C!}LG QP ~y XC. \((`ȘcE@Qt&D$5k7' ڮO0wh*{NẨwTկOH%)^{gMj-oEukYٲn 趶߼xŻxyʄcNxv%|:}tVi2o~KZ-sYOK7co$/Kn )^An"r{pX`y*o;O0ez\wO>PT< :3rUF2-,!7i,ͲUk 1K`@ O| Sf R=DP{?zCɛyXe" ,je`̷2!4163LXOLʨ=MvAڡgP 3U1!;C n}{JcdkVvfeTIYEZo޿{ȸ_|(l] k-Rc2Clj7{qIേrcs CAFJ ZnM',|eTGfl̍usi"Ǩ-a0:Ax˫+g ɷ"'h<:=aE R)}=$8Mq˂wA긾z`ʄIpǙդ9&ʹ簸BYi~,b 5\D WoH{+8g,WpG#,ItӶD&A޾L(NYw$R^ӖlAd9-nA`&pǤ Øm]so6^p A P!z{錑~HH, ^#qYm`OaB1[_erdx>^Mf+1#6p 7,w7dS  S,Pkb, rL KLzL| rGnt/ p'ex :& kτARc`˜210ka ߔ)^S _9Ƿs{Z#6 ŽSaeg#Z G)E ۓjƠ-E'_8!1'AW1Nb!8g,1HaG .S վ-ZBuL0aOxQv|1Ll<FPJ Ox8,w0:I摠o8`F *˕.6 :!1ɉ1d(Vtҧ7TP ^Aϵt LkTAơPoU2 n[ $ګZkZ; ku%4T?2%lwQ5_w"3EZZX0~3OWX%r;a$Ϙf&x$6^FD"2SH`d܈uLX9oZ1 Fr8HcM{ zaK`Bl&tAτ"`=!`XܰF{- tN8w \j)䏁3x rŝC wqS|0> +Lk$(4vеe[fBՌ!9@9w_w%iĄ DZ!bCns}-Q%\rw+aBr{OOtyëbBDն 5UlƑ/Ͷv#^sXbu$$0`7xZa5ʄǩ8c Qt7 .HPs93D(]"AmA&XYE0P&S ҆?m>}BΩ i\j)j aņI-|MCuՙA=&Ɯfw83!o+! gB=l>9`-u6IY{^%&`Vl"$Mf^}'THI߅_w?0Wo?ן[>s!n/`nì勪 xMų"q޷jnfV^%`8%#[]zO3,!~o-d]FA* ؤf[~lfC*NPp}!!pT(dX;rn?wSUo>sj*+ϛi[\ \P&]Cv3pf2_4|M &Mi D[Xo|}q}Pk͚:sjM;I ܡО7fk {Wzr ljTM<5_/usIEt&0^gJ1Y'ڛ` ʻJ02%f4EQ^7QBШ#\QVGy,S԰4Z#4$J#"Iӈ@mf+ (Zl,MJCϱz nPjZeLX_TLMNgZ2D-!Zo5eBf@Mqm8 ౌ"Jp-LK]&Ek1y"'< IiXc皡-(}TE!m520)`e~? x ZG1T0x!fQ(@y_.nLgC2(6KnBNqJjPsj~j~Z֘ʘ8pnˊeնo&JW#LfjˡB\s L!V~J>=wQ뫘^D8&pF_0< !p((L|$$L_ǣI{`#hM}/pF(-јlZ踩n#8%lVc{Elu]+eRr iOE>=>ῦց0r-o}&.~HO$e/$@ *t9"A?i м5K.iOv#TTiG-}nw+o;4쓁 (1{4|2kK$yv !ZP` d'O2D)edq`CA6(h?˅`kLi|X.a{xpk)h_Cз׻Ӛqvןoّ_ + &ܳpcXh* dܴaJXW0vV)KfDԋ5er6qMe|Jo8t cEI }n7$U\$0M(r  @ )$HYV&zb\{&ŗ9&$]8(B gټi&9I 2:iÈB'?3~Q֢ېQ5_L9F'V>NVR$0, Rd\a&T9krٙh ݞҨ_͎7 h3q\ɛ"P--pP Aτmb1, .%èL!@v+ro1s6.6!d߄7pb  }M8ā~? C˴8uji`")F~#z`E_ 1PaOBpncsr*v= Fkhŵr ?%^!/?vm#1!""^`/.sU&8ƀ ªH?~O4 8i2eg #f P`+Tྤ4\=eYO(K\혠QXQH#P;z\K)Z? z03oBA q("O~3LpP^22j_ayIqS P n%]E NF몮͗3 >W_E2IWI7A*āui9L!*qq2`|srקU"`Ie%gA֏f󁒹C&X+) CK\57(L|^:`,2 NS5f%4PSl|bffVv ܷbeRBaPz+ Vcy4v0A-~flp^ U\9x DXn&/D*.f[*XݰF+5ZRi׶ L*7<7 Z@F}n`®ya&~`'&?UtKښĄd)=$ ʥgٰjQd~'/ F&ѼC#='hBQ0`BAZ'O3ȶ=}㲿kE ^;8$%2Vu{p&tc L'A$Q&|^;̄ {:[{KpS)N5+Fk^wBݶ'Ї=__~WQ6bo"_XJ,T|Lh }ˍ KĖ$i}o|zgU1h6!,G4,TZd`$G1y.X/m :`,:}iQp.4gfLgYq Lw(x/f~/6nAĵ`ӹEl Lp6|Qm>:V̫F4ZohFFh_&^ۗ?;=%Ezu@^+.yI!VL3WOOߞ/^xݚE(i渞]`etLXl7ڸt<ɼ<\㽁CAzvQX]=``ª L(B0Ņzjڴ~EM)LCP(HIr.&Qf.0BS׷$j=?϶GɅd uLLu NBDP$Uq@W4Z4ZF} ەٻׯ߿ *'TQ.i{[l]U> ZT'Ge jI1W?o~ޒ,7o_sNξ{wqDDRc%q3D&)A#g,ti{`jIF&XDap!Ç"(?.ZOMZ s#OL(3מr5@l&<4v7,``^וRGlӓڜIOkhuL(/Fkhݴ۷nwZb㳬EMLTǸOLX4m+)}\/w3:wOs =Uؠ.U4pTpDPe%bY:x$vL(EAKTWϷ4KWI*!{Y O29B7jEp*&ތ@pʄ]_{n :c#&A4Z|Dm>8VT2#&YcNTX> >H̿A=' HUadc+#sWQ;ۛgj|yajHB C&q~ڻj:;#ܼtubqu5TL:^Za0Y?Sfb W T9+GVڜBa Jam *t9Kx ّFk6*~BZFi滺!&n7lWf[ KP1 5٨zXB}&8(wᱟ`B?A\,`cQٕ&Ԥq7zԍ ܄q a dkV2:8!Pn\,iCZ4ZDY) 3bBMZ'_ k҈ TU&Oq81Ap`Og K~;6?'|ͫOtȓ(B0ԗ [2aEb dDžY +TF #6;O($Nyf1dž9Q/紎;=ʟд Te`Ae'prDgE̘6A$v@V4(L S&#Mi&zh&kѺ-F5&=HӞء)WoxGf\1 G୴аa=鹝cB==WzΆuM1 n/߾"_rIF$8 9E`T9{eF;R~BHpƆMV$LH&,#D3 4R H <۲y5b\/Ad)L',<"—Qr&bB&ʄ 0߷lʄEsD#YDYf ߪQ4Uݙhu:֭j ՞S$xLT3<`#y&wBw>~D"bl@!l 0 ( L$ԌALR D>ˎ qA~OXx&A G vn!oA[ys 9"_pB~>~80"k& ثg3 J(slrg%*w<9h-F+˻4Z ``VPUP}'{z !< z-4n ,k,&ˏQ䝱}^GTc3 @Ca$GaИL F?@H`QHVk"gQMäz : l4ݔi#fV&1[2YDŽ1 Xe-S`-PѳDqN+*-'˺*,S^՝սNZ kկQ}\]zUQLU'P˟l7ds:,\-.i3]}O3tF NE̙"EȃŘ@G6 $k Y6H',M"aBW}Itg 9ev~4Dd6s\(Qʅi?&6^e(尢q[b Z`q@s ?5x($[8W1as'6!ĸ[so1c,`22&sNZXaA&83*; b'9&.OP0ώ@'C!4u+Qq@Q/K0d817LhC&e.VЍoȠTdzeZy #ji )A$ f r.? u?>z M cDWPH^xaqC*% ~rNBf5t3\) 5)x/'KZ0yLPC_4VGCC((g.8sC( T >Ðd '2@]ZyT 'y &@S7ByP96j L~&Aq [OhLwMslB$Nʝ [g'2ag 4`g T(YEfYL#0bc9K*i'JP$E"!dBLQ`^@R9I) YuT´3 R gAגp8ceVRmk)Ԫ x*F"2Oy)3,٢–`bG).ZT5RlM P ۾w֛_)h2 :vQjD3-d`L_0j&U&}lM;ﲰgOB`!¹]_ǖ4,[bfF=ܪouk5+7h& qL@uBq22"!a3z]a&`K'0Yd(oK˦T2/2:~lֶ.+mĄ (.y4}:m)GNUϿRoέ>6; NZ歖UZV50k0A;Q? ̙R;A&XPDD0 "9i·~>P(Ii٥# QD-z0>\,I7?h:N0IRN&xB%[BO{|0>(&k ƌ6pAR#)6Wr98hw9f"V.(7,f :z!0-IP%(}ֲa ;޵%ʾ P(`(]eR(@`&0=Nz@Vay&ʍCJT< ֩\(˗ʧ\3;8]u7b& ޝ1<0e%e(LS!>5MzQ[E{;* XTNx$:;d hHD˻ys@W4EV0p*)ЭeCkEWV-@˗i aG/&02BĄ48FZL"^ Lg Vd%_{*z|49Pt`lRh&9N g(psJ;G IVE >{zm^U_S،6ξqq=+v8@̄oZxl0zw vgBYP Gk^ߵp%L`߂GVK= ۍ0a,_v+t&$H; ŎNHI@_2bT1CwHc5MNvHo1TuF:P tq*$kнI׳@Wc DJLتVEC6-N0A .!aMPO*;`'uњhx;! /(9OW-!߻Ʉ %ؕYZP\#d3hW8$5Z:h(W2ޕ ,pD3a1!%qga *@Qqcl_* &`bbLjbNHi҈ ka?@]q6{ݛ(τ8OO*9=VN(D& 6ڕxe~B!rVmUlNp ג1έDd,1aHX"gLmKr0`G`Ҿm4d L Nd= x&37se q~f FKMSA |āFIXg8"XIeuhv@[ p`!eAB5f9Z0e y/z돟8wՙI OQr*fBAx920, GL< ]evTEHK+']]* h)0ϣ *Pb"$^ߙ K'4?!!,jZz> lP H M&F3 عVN @g,41;D:p+_{DŽC6{-zj+_~|/xlt"h^)&c>p| KU־cek` $_o?$#<K(,a faM0t`g `f7YbXGz ֫ 9k 1(r0jZ ` h, >/'07{ ;ݯӘ ycֲ'b(0u#9ZOfpoe!'xd;Aq3Ā ww&m0 evk* <*h)Gk`$*Ihѹ7m"^Kz,EŮv_V3xɈ &RD(sʱTX/8A1Nб6l>3*dzH6Q2P )C:GKEh EF@YkhZG|j0 @k3M+@d L[pY6ƙ@:1npqߤP稯Ph=g5o.GsN1soh$mwG~: @!8#=L1Ax$e&T0te`P !0Oh0!v5\nA1) ɊyJI%`#8Y&d*4l|H6 Tx4{%!v&x (|'D`$WTJ &;?Pұmw9Wm?G>wW:{hٲn 6RoC 7!vHspOW[ u0Q# LXg]kI XY&\l)L+Q,U 4οK- p,IA .# 2]HOpZ*.0u!`%tT1ޘk*E&V |g'4(GNN8Lhʶ^^CRC%L(MZ*jp$䇫]m6j+JA]&.q~%Ihh9e)<h z~M3a'.Fچ"OØ?X=ֵ,\Z#1pϼ 2!< ʨaʴa'DEUs׫f|m= '383%ܡfL.OhSN%ټYrn瘳&?>wlN<ń@+ gZNZ@C]8F%üz'96O@h 5eȍU !&41 psZs^TQI؆P|nn/y>]_F%8mM9Z8Ѕv™&oy\!VHE28HDVy" qBxG׻dU$vk>hL1& P]HQ\;QVdie&j>Ye-f^# *UBgi׏{2^L9 Pz,S>MeM 38ZG4MQvl%$EJu.fQ:UUi(XEBL<#yfȏe2c?ߍm Ug$w(lz ?!u `ܵ^ mj (XU\]8z@ [͆1y@y%Uv~ Z#&PBhv3 Uh?K=M0_{^ WcZp׷Z p"KX 虡v~١X} sy m"%&‰5ŊqH,Rʢe2ZzUhb4b, 7(eO.TK(,`hri*k#55e|X{ɇ֧Hu,841z NqL&0|.tVggk؇Q_{}M2B94(G%_L*'=3d"M3:H|]mz`:aAü+H%8IZW_&Hxt`<1uv6 xWGsaPL<, zν#v4Uc\.cB. Ӂ|g&-`F! L@%:fa>9 h_hP=xìuȄrtTjuvLb}_'$Lc țog8f@aҡhlI(t( |dW5RuZ  C'a31qCoYڳrv ds6C(2x"  06 QUV B:8]-=6_u{C̀,[w"hk<޾2À,7*e׫uJXR H?4 2],K B/mQb 8h?D:9 ׆.ya6^ReĭhB!VJ <:;A9ƁBH혠 Ռ?x:&/v uoLk{ɸ`=Q?=Y m>omd-gvXm孚當 rW+V(!L2UW rx` b(1 c\ jq--_@ZP0pqx2g%5.},gZNl¢'*GB"6uA(8drAT=L 8iplWm8In%ӃT.H'`%NbR\Œb,c<7g퇟mGbɞ?Q_^~f{5?Ӏw_WJAJ 띳c><( 7 M{'c!<_"MPX_,\)36 %@4`i7@ԅ(A#B "GB3&K"BJy%zCSt"5x\H AjEjݽXz<l6V_#^TWfąGB!|<Nv8K!x_0`x7O/hlo^뇟^|W=Y][Wuyf Ш T%7 NC# H{@ ^L8ѵb'r&Q ݹl8M!LrM?c߽~Ëׯ_mt?Fz~Ow-_O\HxW*5̞_s"_]k Sh`B!L (ϒuCCl _/V]x\ŒrKkb53d8;\>~$#H0uU:L VAõ3Nc"Dǐ3sDp6B|HC"` e OF4 u'_1QuN1NB18!z*T4 5G+eU,[{~6AΔ5Irv]A `@y>5j&dFB H!OL3nC̚Ehݸr(ealQ KBZ)2g44L'^_Y,W"(K&'$ Cl(T؂PqIJ6 _(21K^2ICA-ulF0tU7n`FzEг`_\yU]Ąp;EoT s~Uݱ=9ۖp"~6Ըby8~5tU-eEs 3A,ʾVZtL?Ղ?VELN1ͦܦ`/hF(lf*Q @` *o\bcL0IH"|@HA&/EBI`e ]&@K UH/*h;ġev{3$S 4o,x?CB;iL8%v@vB:T(9jt`'zo}"my[_Q^Ƽac\Fj-i0AUAEwͯ@p䧣pϚȦBjRK$_X $R!c&#8рP%e3CαH;򞱁H "= aǻo,7児 KJBBa;Ʉivҭ@QA& A[lΟ7ۯ7_c;A 8%NBr0) њs_81٩Y{ySU鯫6:o)XHy& Bh;9v;the:rHe^,PdB, MaZ*9EQj)d}k::bj.3 LP$$)las"Ƥ +Io 9  $%֬q`(,,0O'R8,z ej}񉼈gJL8w(mmܾw=!HL ԯ5_k<dz~E0dLcȇ[8 uYKRECYt\Jn!!?p4`2lrtkHphjoн0gQlH;"Ӗ+Tˆ:t( *}ˬ [~, "*Ķdb+3!j;PA " Q|HVӧ!SUEiիW$uaO`B<&P{؟ ]!5'THPh,"m &jnUTٌ N GXv`R FREހGk}4A/UhVDS : RRaDlb$gH)d$O;gE}{LR@ P] ߸9&>GU6ʰj-|☥cw.,m\זO%8xJS ]5 M2eB]p:"-`;Ș0qG$(ՂS\BǰD8bG2W K)Y_I*f Lv:$+ [`DD3 ~|WSPI'UC/1t0Zqןَ_B&<6w@0uT3㩠+^  ׊#c?ãoLh=[M%!Εc-:惿nh$l8.H ٠р NPCM#Hkx wX-85lu2W%W%*' ԗ(|F3M-3XYJ0  띌Ip=e Fz6H@sQϛ*\Ӫ6V={LR=OL /64 kK·漗 Pf p;2qvj - FYBKW9;6W%?2dR`My[7\7YFW2 %K} zp3NP?!t.HT;Q47Y(f鿈 Xsǃ2]e79-1lu#T 3L'&HaCߡְ :rW} [$L#S!}RA}Ub7L} *xGFL7`z Xqݴf+Ɇ 8W |cƿm5 TP.L P?DPE5z+Dp\={ы{- 1zP .F?8r %2G" X .0 1&crرʅXȽuj'G2W;^([TsA Thl,2" L dct gc߼/C\wq'?"2T( Kԏmގ@g Lt P6cP3Ca AK8;}т _ cr2δvN;y+ 'Oԏ![o%{y%MFû6MP# fzk$u r'ᄳF }NCL詪{EAPakɟWqXݼ\2~kҰ1x~($۟oZ:(JZ|w`}^@5>!^ ,x$X&~$Z,Ѐ-Ag~A ! (<ߡy(U V WI,A;`m 苜ЄpJݰC÷c p8) a65x#dنn}Wv@]ϯ>|>UM4KoU\݈I\eC23{&TܓyiK*4i2箍_5iJ0a'@ Cx/FlH֝ASY嫷-,O~n9tiNj5KVj)38`(!5kmQܙw҆hԝ "AJC·$* d'Y /̍R?\m30r +pS2˄TEHK}|„`.\;bA@DfB & 隉@T`Kemz8HRN;Ąd[ީDxc 0!k_!^m5kql*Gy +Ӭ&Kۀ #?r) ךߴrL6O(MQa%c,Z]2x%01Ě@ۖq ORm*]B  P l ]`Yb&ؽŇuCREwdC?vԵ`'`Ka@!#R&C&xq`\tsW!g+d4q 7nP _@P8M z +AT7\JhsW[W7>IW9/_ s~LoNduTLrv{!"7ox۠!L8o~k l?S~Mq"iyLh#&::e0f2l\"~HVW?WW_)/T!&|V'kWך:O)UPBA9ncvD&pL&i43>g4'9P#Obn~$ X؆;:" %4tEUȱJ` 5ٳIMi9A2 H:-^],*0|_ʩoAa_m4<OŮ OVpQkLX3M0* @l#EaP=V'.Wp\ Ad<8;Uq5=f A v U+Ap2\i8hDجqw^jyV@.d9U~p:5Nz9CeSNN4M:_9p UBS]$AE:LPLkؗ(%G@œ[ l)Z|mQL4PAWR'D2I,Qo@ܯ^1 w`!`1mA @o}Y1aBt(@ F+NP\b'j<>:3VP2yBUQʓ2S@[W'jH\D_/E2;@_ݥS,K-UP0P 1Yvky+|}&u<ϕ^n, uGW '0a Σ,QH8]iJd%b)5,, hP(d+nk3M{Ï*.؇`_jR&I["X*ʖm 0|@r<"SF2]F͈VVdO)_ba_}Yu"JQVLO؂ߩɝ`B*w{8j ?::ła`#\vIďpNq6cɾR/YLc!br.h3qcl}vPFbC峙^8ۈ/7 Ȟ[Aqݮ3.Q`pVsyBtISDQxL+"nI! v+eՆ5αpiǗڱl*>3CӠ ZTj]C]j3Y0-Cա :*[*AX*2]Qꞈd L\fDJC1KQ_H1aǸ_a^55{COa*P`B*pr lv,NQ1C3&r黠1w(NiJd ⾧:=r߃̑ySI٬cuLʃbLEF @BEڒ"x>"{az#E2 xgʈ2 .J`†190ISX/ɗ3.YYgmCA$3IFmNHl EciКչ`Nm.qb `1z3t֎/4A <T,XZyH&j6+"b Hꂠ=gF-  5!Vʸi+Y ̈́O_$aͰ%gVV+BP*D x%xtal?rVP>g)׹ Oh2VZŖ '\V{ozXLqb3aiX nzQX4 㪺 #p2gɄ0ȄSVmpDAP\>_nd¢MJ `%C.D.q-",8iHv iʛN_$ʺj䯪>+xB}Eá FƁD.}0EAi@NC@`(L$Rux c]o *4CV4Pz㌕cBA2ĂPo%E]ĨR8B"Z$!'(1B%!&l',A;w2>)VJ o;',\0FkbHfI#n~IfC̄; Al鉠't\]q $A űID- v?76BPڃ7UIUawrA萚P7`P$(<'% yA%2W0!lhv?7U=ʄ!/6^ /K2|VX2a٦9,ޕEtF#JGϝ1$RxăA^ob?0A:. MaXZ[5.dY6bB%LXkjƠ'l v]'xZOx`lY&9Z+1u! \(NApHbB 0$Ah&v(GAHaB ˓/=uXgE$0Ë'cY"Pt lJP%CzfъŎ ja5▄ "@!D2 `߷(HuB0ʐgA ھ44 |^FcjOnÈo#&dMlVijEu1F#Dxp90 Vl.r f@a\lsS}BlQdXɔ@H^G с@{).L"$D̃$ :M":Qa&M|[<:*L4.ѪBˬ&|a;6xniO6K+, )>0DZ=!T:E~B"\^]&ͮihFr * 5^}9Z/)#\8CW3Рp,b ώDmQaSc|E %7zkY,H@Hw90 .lt*bA{#Rz `(V3|#w Kx{mpVCHA,,ȧ0 *ʏ* 2ALؚ&roD@ѡvjW*6 hFou7G+2ACȚ:=nFE\(DB GL8 Hn5A&IP #.D*N`q'΍r6Q>$Lp({@Z 'Hpi74qڱ]!&0>mQ2:zy?f> xװZC U1=̄ {nר}GIi pOʩ26uq5h}/ux\D;~t^  ׷LJ//r_IOᓰM )Th"dh$pσ^0FC 888DGv:Mm˙C ,l6u(Nr7Xe~% [ [wbp G#c{IbAPrA-[dB!{ *Ž6eUD/X̄ 9Z_`eyLuL'/VJyzȋ!h]4]T$>T1ᖶB_Q,FJBAy|;s3|%"`+0q,H'$ yGe6hB"1>R(8̾&rhzv/DQ BvUzdhRTS,tWAY< YrAѰ\(4,B B\_iA($|:( uU <5B hMh݄s^P!~.sX"X]7Ie`uXp(`?Cm NQ1 2 ,G .sZMbف!3}i$`)`bB+L͸{'59\9,Txf6|hoA4 W 2EBpBl6zBCԄ osτNAPW :$Ui0aע/GWH|Kx^|Gi Մ&Q̊v Lp] hS + #2"xB@VqoKg0s;R4d8r4k&Ą 82li\$( k\HF6mD8sv @ڂM:Iԯ#t6psDSX9c(ģ \tUB*{ Crۦ٢{մHP{f@u$\ fSʤ D9Z?CQrP Rja|aBQ0=0'fchKmD H"KDՉ]i*LLiRX؎E3~,s %=<2ۢP}=TCX h[Dqc'~hB>&¥$0fW g#%Jďbb̀ 1,4"w@۟`O&4 P5vJdL(PQ0G}N_82~g,H )eNS1MLXpL# &HZq9-v BAb~aہ c0 y*d(yP Z|$H4c \!Ka&TچBȘy~Wr5#@1lq W\w;-M$1!BuB9cu6o2!u@@|%T2SBZD+w|CZndVl.HrXb͙KRoI/Tʋ3RA=Ͼ'_r)υmF`.S\{d"YX1L .$/MA(A @BRm0x%Bᦀ/eτ @aݯ w{oR:"L?LgS%U%*FYr Qt`wU)nEQ2)6,^-Cv-J]ac)ʁ_P:ߪvX[Z eajTsPA=A??! "i (YTh™5ЅAZ6֩|*hhN.Y6:">?qiW=naB!#F&\#\:ZS<¦<_թvH(Lx) eH(/@6p"/+ /{#ߑ(Nvݠ&ްj\OmDv ͓i ]npjD Q*Z {"/x=Og1f8syHb5!Jn$gZFN Bђ: (bˡ۷9Y+Gw *4dH(j{ve5ܛ9^{Tv*j RQp6 "qA^YI '_j 2d{2Ǣ6t$ࢧz ՜rΒĴv& <$/GB!I }g $ xC^*Bd%5g[{z?ft~9L^|ƾYd n8 ,t.U  zBP'\,]POX'dr3jQRKt pqF80h#ކ%ds7F1/>PK 2]ZJv˂˞);d1Vȉ<"<50 ՍaFv5acvI ǯ&Lb' f5 =ql977ӆj }p{<\XyL|@R0/~ 9QHJ6K@Sr UQp~աB\}0k b:xen^403 !$u RPP(p: P*7H)GZu:=pUp19kؼ}3V?~"Hmޤ$0pAVHYyyL_EaA9T3"\%[BuecwS"+*7;.ױu6tueAGU:  B z(B>Jd CHOi˄pUY]?| h~xxs~{Z%⫧OL?y|O?W* D*t}aJH2!v4Ls e8ٜV*KQ6 #hN`m:s1 aq6*Ñ #dFiAmOMQX N8QOOHQ)JZn"ъW$k}/`5G|ҫP֊|exS< ((VӺJlc`#b*6KM/.ֽL,s|Ҟ%eYBp><.lRr09ۑX"g‚tyԗBRP~ݞ Ɵj) .5LHN~ys^&!TovP<5#W| 2ȱp ȺE iYf`/a= ogu}m ivȂ;?8yQ5``pFY'!A J@KY5BA1`>V}S@  /ܫ(+oYT̷c4 bePlĜ3 Bt5p_ r{Zie(!&H{T-͖}Q(z!ƃ.{Q ԶLgg:'  C+nV 00ә7r _t7u֪ n"ACk$򆸭sLUUHAR )W"L0#E%& t D d%sϓkݶ秂}aEA' w <\̄; 9Z̈́^]LH iPZ>ؚ. dDXiQFPB2 v&ؚRA<AmU$z[ xF l7b:1-Gu}q94=X㰆 V]Qp݉"\pƕgi4vؗ0 'GkDL~lRnkzr Mfh]m!Gkvz?QկZ\bH6~)rBh@I]h<)Qzv;BL`MA,ס0p_Xbx`!$#]|5Ba]+;#U_o.T8%_RQMv6L?*#Whipk4$Gkz;̬{5_9lQ`RLi Z^tubwiAMAA$4{KĄD'!ĂQՁc881\WŚpA&4@:nTrvwD]χ3`yk/sDC0s֕RR^^IB[ Td "r/z>vS2In`tTp`NS@DM"ī/*ߒr.&TNC8Gҋ=A!b~CH@߻ŀ# (5A~?$1 F N4 L7ra{ Nπ4ihIRPHhŭ>WΨaApIH`"1z™#K!>GATqL5'׽  |$RQ8KA@U'0A"q?G(hh6`5A-;ii!h*ͳ5!o')4ExJKs32PUX,/?2} )QaB…/+0 RO t8yOdCAN(t' aʶADDDf( 5{@zB/(uA䅾 ,Xk#7cܦwh2C9zh0 Px~;0d3+ ccP@LQ0y:(ItsYe!>>M I@x<ୂBBP8MO(5cDѺϧ6ϟ^ڏE][Ru&knv]TE# AP>'˦ګ;KӥAmju5w0(\gʟ CrbSju;1)ĹIXuAdd!CAs}&j1S{R {Ro@ ml cۜEs/M?/HDB=}Fvӭk碂 l4H^8YQMf,P(aCA| Zp: Q:ZTi!yY\.)>ٳ7CAT"2Jw.'|cz]*@PEOaIY#(g/y>޵*Y˹{%׳Dn9[PR $[q> /g݊33J|m\|sRWJ5 ѻZǃh=$30+ ڢ%) /w\ IW0zp$0bQPD'& P8Np1^fHjo/+j ZqAEt.nZB6#2oyD`fWO?깪;LeB ܱ~YT'}Ew\/WOTQO;3j5];&J8jP;+ f} [ ܵbe*u}h\mʺ= l x>[ T/;ZHđe-dɼH %"vР+!YqYY-3̐kwpNC3E*Sa 6H%Y~Vy4ZL9_k/ 0Xd7X硨JE1+ 4O2F\4-`@Z PyRIVR.N?y<kҵ1AWz\q¾'ыףDOD8,E>3&hsz쁘 QL{(lRc^|')^Ä_Ąf~()J~B0# ^wVP,5{qn7[ Dn]|M<Žy|s_LPͤ/DY|-%Wuj{RFPe़1a,0u{^hZkF>ՙ`iQ5{7BykV7djӿ(xl8 s{ wRˎ{h}iu{C%OE ZEoqB51}% 8NG0a DȤ|+nN@HKv)?އ 81aQGҎ8H]н 8qggxP4,[gq } ` /E/TA3m]ّRcN Y}r `AEc0YjЛ{ъFM@"m}ٹCgGtS_ {E?R*9FQDVaj!+uaDèItT^j%< m*P@ Ѽ?u'y?fe4-~tjΥwRDKByPxO}9F8`qBVQAzܣ(ꭾh%OEM29]jHzG0t:cp}Fa`B~ڨt:p}>lcvN'4[1*NՅ>qp t ;:LÅ0)tOxΝlݮ.2@0aMpVGJLy i-"yZmSRj٣ պA۩aFAԆ>c†u܆ȐmԚtn9S(-L! Okݰڗ)D}ၾQWdy0yHQ( (Q6zknX__ϝ>bu9tu{5(#ʟ ^v>45!+V}Oߚ 680ј%+ߣUI?C`Bh]i*zםA0o &P2 sX>R1FƄ:JLAZ&/b}@6!JX,*@2L.[0g 8{>+``1A|/4q1!Vˏ([zZLb @Yf1!Of wk \=mʕvZḤvPH0uʜvpD7rW|0:%u Zz}0SE6lLHIa5)EAнj4ɭvo4g_(.Q6YҀ/1PN1&p1^Ad~5 #Ѽ7DϘp5BiD43w).rw=Ac'AOLG$#ZK֥h-G}zi>U Q-|Ҍ bBQ;5Z(>lpuN3lptRϨ` + ) 8"} g;Kp ; H58g@$G%]l,q6KHǐd9tik?*&c}٣f(N8 &zP9C`]<Mcyu/<s#qzWTVQTU؂C3Cd{" (qB* ڈ>jqhS N0_Td';߄ )k9F|cEQywi ֺ!, O)n_uL&aq^HaZ4[.e/sߣGf]KaʙWmڭϟs1a&wEuztwT?m8snAhI&  fTЌ#&Gvr &Pkp{k;_RY}=k{_ޣjw?yfͰǘh˄a1 &h' 6iƘX}F(-x~ƫ|JRR Z`$V¥D3RFk 0 ֬?NskN&@_jAcYkSƗzpn(tĺ`9[`їS[b%$Spz_4 !u+ab0P0A8O&ӘPњ #1&4=ZCۣ?t"%1 =)_/ߐq}L&F neF18p'ʏ Yv  v]֏KcD ;J_MN0 LIP˧-& -XL84ЊbS qA8*Kl6M$ &$LH &D - Tl5}X(cN&4) tiAsGmw&4`M6Z$a'Sl}O!9X3&D#8bPcܲʄi!iLI`£`BC0a qﴡń:e |c"KR^Y9YZdNz(|B&{Rl0LֈZbZLG Di}@ `1Rj м_o1md U:8cZ1(yWG(|ٛW05dO rgҕۖ5dܠ+-&l6q~VWip# F~Gp{:;_Ma•`œ {Tn(ܦ؈0*x$TVsJWc1 &8A7R ѧ5|Fދ*$V3 S+h>>{379C"I0vÄɕ ٽC.">^\g&Ƅӷ)(E8ަWzp=Z#ۣ5 MnY N\M # c(L0 :?7 Vo1t2 ~'"3.=aZ1ҿ>Φ~P g$mȾ~q^;k'`Jť0__#Hr Fvlrnc'|l/?m/+ MAGJ+!Ҫ r5\0agq`0>J+dQh$"uQd:3R鳄ncA&X z!Nx☻R"0MnLӔE!ՁEhvD0AhEVR’ԩb49yY"Z(Ɂ@J0a(& *&Ē Τ%Ё47Gh_ &0)`BjJ'cH/hU3\&ۣS`շnx_@hCouv%&$ZdI #ͷ4AZ0d1bQ¢~T ~:HM%*zD ;D|CU0KsT`7;)`|B a8z9A`B  r0*fյTm=!O8A>ڳJC|˂Sp'w`Npx˫a`63y>ZRG0j(~(Qn; mXͿMHÈ6(%7N(hh q8'@[PB;A%u\E ڿ.̄{7Iȑ°ۧ l{ú5ja'5}`v=a ȹlMjbdȐ,G^uhޟ&)hEXϗ AHC-H'p'I'% ʣ;'/]iR{N@2e9Qy>og8p&<ȐBVxCP ˵(lRXTk="uQ'օh"CB5AY&N W>3akC(5Hw\VY+9jS,pA ;*-bw=l׳/ OV7[?IJh~l.fVA>JVfCzawǗlpE뉽ls/ &[zm԰OVx˔pܞ c n`A? > ʡHohMMkzz/d#T |@ 'AWƧe~O#1 q'O]iRN0 h~. 5Z4,-.$$ɠ<]Ǐ˖d^JqH~Zxt&+e<C)& A&|+Q :}@{nYL6xtQA&"Fy?bc-=/=)llN/FO859kZU2V|Cm劖st+tQQdOx#ڧTu^XA RN9!R>~ͷ% t|uG}'I}5";rV CC1]! ?ORrg_vCi\,%BlNNJynZǘ ztTdޞT(|F--#=\B0;@w7 …@_PЩ0!|y+}Q:ZP|ͭds{:Gt^:MiO"X"IT%q4&g E!V1s[8&ux ٿ W3q=CL( Lmpe#,ii{ThN+pP!%1ThY4iX`A^O2+>G?q>wb[̈[F KD/j~ hoRe&&`ҢfM1TZdut![lL\*[ Zk,͘MpL& BOi!>J,v *83;F0nBg$WAq ߨ~fh/2&(`&T{uڤola/7p:X6Q#s>f'A'5 ris ZЮx&vS\S!zC ń V+,^RA U/1 9Md~B3Ȁ4aAG}-iҴI)h9U ;V6y'Na{EzAAjtzK34LS5ٶdHN&6A8J;E3YG˂`$$.B`B ѐ&O9H>G,'mԫ;'T ?y'bk7WA+Qo+MtbN $/ 1!J:ۘ*܄d[ײA?LK(U\6NesuQ\6޼WH#0mS C=G#T$^ q;efQy[2tx,IRLw'l#&|Liy|'!eYcA.͌Z4IӎVT.V(dL.s!D~2q5DZ.`ѐV-Xq !^Vxޚ1Y* junneL;lU&  qZmi]" ]6l楹NL@Zh2U;~­oCތEfnl'xc H9*J\ާ?٨wB~=docNi"\ W_}r0Q 5zM.ӕV( [{W#cۥD-&qв&`^PWr4WlӤR779 ǠU bN؍sc^.nŽiio(핅=5`?%6L&\ *04`hfT n\AdQh}Oz{d=b\ƸBޱ m=K`tL^j 1-z!.BG(~o~L!PBƄ_l)aBuLO@ăOڃ@[脝iMh"qVڧ4\@3 I„&&P(~;plFLRlk2¶WtQO˝+b~*r~[k\O LUnԆT( 䅱wpT5p멄ః,z1!gk $s?, z:&F^]m9#X! Ɯ~d3~3  ?sX':M`B׳f xRWyEERye9[LY$L L?!Tޓء1TؘCM/:aZtܹ ZzHafe0+i n33Gj~ T5"ɀ͑jE] L w&j >&a)UJ K'p9 A͕!MrҁL>*vaџፌF A2Knb C&\.] N #bKG jAcr:T;Vc <`¤Dݴ0AxWjܿ<'D`ꮟ PGLʦN1[#C1Y~&fHF$z>0Q9 -A` ˕C'& x(JQ]?#1&DW=w:ϘavP3L0IF?,6G9]LLuV^[CxKۛ}|,ցf V]$ !'\:q:is ;33BwD9;=L`u@'D2/`L n5fyqz"F5HlB?bA;N/;ءn ,*sh4bnmgLѶD)vH0zAAy=;@L;AUxy]} Ɲ>]OIkk)P*ŗ4-tE5mb܈ $3f&.QB3%:^O'0-6I]FeEeʨ_mK LX+ Vw?Z[7m\ B.DVG] HG|n"\Q6 (fr&Ll$'L q|' }Qc)3uT-~3&8Ko˘0|b<XuR oox1G}Dc I'PMSt?)wf-'QIP>4=,mSvۢqY ޾|1q{),tJu?q%<[pu JxgIV}vR[e.-) 7Xֹ{BW$h/:f KeJDHM慃* ]dS)tx=U:xg,JlOd`cʌ-1xT#Q`!HrXڗ(wRu-5l> k$ d;Yi̓oZg6~{Z?_'0!hOc[K'o5~WcVt}ecp^-Й';r[QJ Ƅ?vvnڶqlJ^{&E\PN?5뼔_/ʹ!UA]F-bPխ3\8zMr6.(JNhmAXXzby1rq89",Hn֥,;LxCKKr{KHŸÛ7A#?N4[hHM1B8g^4lb ?k\xO`GWngؓLN04_ eyWg.e~U8 by`iԋ#1@Ju٩mB6$ |fcz&re8%tu7.\ DP*[̒NcƺOdmiSߐdӝ_YpYi~Ė-ci /px@H*hčhP5^^qɺ7pǮUߡ+Dm+ dt~ ivσY1ym:*woq-Xn4ծw_аWqǻ7p䷛4O>=Tg.ַC``ŧ7A?x# Y (QKϟF??n*`= H JG!RNc_@.7`휏88%vpEGz}f1)` ) =b"{,'˗ QHÈ.b(V^ :F1A6VmX2NS:2sJ&m?7kQ,pX82?$.9˜Z'` ( 䶣R/␀4 -=IFbA# *q4xҡ@! 0Ð04<ŀ#SeGEaG[}aq 1 bS}:3A 2 'Pc##8 ?ps_>ײZ!}]I@=tuܮyױՐ;{P'lD{gq0S:$03I?ѦOA Y4p`sGd>mk9C1(~k찥rOqDŽܨ 2o6DŽ# |Ob9Q f14on!cBJ/5Rp+f@PȲjOr)7cq˫ҍ)uhxiZ}!NP)?v>$jkaOap8 9LzOGcb:yq.@0eN,$ORt& {`A*Ԇ[զQIfi7sӸhviefEX)s>ġ~* 70nEA'z yAeXXEad@@O 3[QzXQ%f8˟ӗ_\\\Y8˼YXCq@e0Cu`c?!)3"ywzR9IHxJ--6o|G ̗ZU(T9)ɆVx$ b^fZjdlQ4E&G쫈m2~}0NPA !ˆ~g$j FWqqf+ R?E;rj p"TIO>|;s?\HBlY.-H:"Sn#TR9bTDl[9ȅE/DIAәM̃6J7\e񄏵rF80 fRZ3y*(GA Ӱ{Fb/ " ħH5 H' %p}bwINRvQ8j_\iR ~w]Y|\ 쑧0ˬNn*0Om YӦ6=hȣI'Aż-7,-.`pqBzXPR5F?,ڃYa`Ʊn= t`[æP*5CԽOhsbR̮ "A\%zȑkBc9 IGC* <'H(p}X%$H:NXvPݸ`PҬYNj1_G[bֱ̺~ OeEIԽ?Q_1IkOC[y_(t{`Y[v-:<3Uhi"D#g1 4z;3ObeH>S8m-(5%9I$hr {M@SO,JQ%8zO{4[aDz.Вhd@:'(165^2`Sx(*`uED6Cb_S7;3|fgE|\lA.3G&{o Ύqѿ !Hn+ CF E~UVdu@aد[b/iϸ r drͿ8r d *y-vg(cX: Ӛ-h m.P$) CMW! -&Iw 1@aH̢,N80@ff1:̺u$4~È -<[kp; pZ '~PPgB5tRcs[QļkV7yVV8wI7)b3 vRHn`SLЏWWa\@gfe8rˌ(-d 5JoiMx5g23 $׮:1mLѿNXߢ틕|4qQȀUG0@8GvkHq26m%zfKmBB2e}o:y{3e@ |IJ<`|`L@!{o^?he^ll-p<>>Ʒ{HB5oS)$`BaEc U\yan947IdY*Is@r |RN&y>odxU>\5ȚTArCNv P1O!2xhX鑜~lCi*BT!rGa)Cy f"XIJg9?qx5v]:5C 3LnP#,0ۋ-r8Ec(e0IE7(B[O(QaGq?D pO!(!( 3lraS7 y/UH=kֽ?gdj9bTܠڛ+)q CKã HJ7!l-H"&eZT6hZrx dˮg/.zS;(Oh% 1Yu燋RҁVE~<Qu`F}?rL)=yQ0~!*4N!<ўC) (hR(a#ҁJh!0]/P|T g]F08a7_^]{sN)E} X /zw+=CJxӬ8UILoFǬq+ ;0ٮ!`_ˬ虆\>qCCn.& iޙh7ݐA8*jy*>$a_ڷ:-?W5P$IENDB`sqlite-utils-3.35.2/docs/_static/js/000077500000000000000000000000001452131415600172375ustar00rootroot00000000000000sqlite-utils-3.35.2/docs/_static/js/custom.js000066400000000000000000000015121452131415600211060ustar00rootroot00000000000000jQuery(function ($) { // Show banner linking to /stable/ if this is a /latest/ page if (!/\/latest\//.test(location.pathname)) { return; } var stableUrl = location.pathname.replace("/latest/", "/stable/"); // Check it's not a 404 fetch(stableUrl, { method: "HEAD" }).then((response) => { if (response.status == 200) { var warning = $( `

Note

This documentation covers the development version of sqlite-utils.

See this page for the current stable release.

` ); warning.find("a").attr("href", stableUrl); $("article[role=main]").prepend(warning); } }); }); sqlite-utils-3.35.2/docs/_templates/000077500000000000000000000000001452131415600173325ustar00rootroot00000000000000sqlite-utils-3.35.2/docs/_templates/base.html000066400000000000000000000024331452131415600211340ustar00rootroot00000000000000{%- extends "!base.html" %} {% block site_meta %} {{ super() }} {% endblock %} {% block scripts %} {{ super() }} {% endblock %} sqlite-utils-3.35.2/docs/changelog.rst000066400000000000000000002145461452131415600176720ustar00rootroot00000000000000.. _changelog: =========== Changelog =========== .. _v3_35_2: 3.35.2 (2023-11-03) ------------------- - The ``--load-extension=spatialite`` option and :ref:`find_spatialite() ` utility function now both work correctly on ``arm64`` Linux. Thanks, `Mike Coats `__. (:issue:`599`) - Fix for bug where ``sqlite-utils insert`` could cause your terminal cursor to disappear. Thanks, `Luke Plant `__. (:issue:`433`) - ``datetime.timedelta`` values are now stored as ``TEXT`` columns. Thanks, `Harald Nezbeda `__. (:issue:`522`) - Test suite is now also run against Python 3.12. .. _v3_35_1: 3.35.1 (2023-09-08) ------------------- - Fixed a bug where :ref:`table.transform() ` would sometimes re-assign the ``rowid`` values for a table rather than keeping them consistent across the operation. (:issue:`592`) .. _v3_35: 3.35 (2023-08-17) ----------------- Adding foreign keys to a table no longer uses ``PRAGMA writable_schema = 1`` to directly manipulate the ``sqlite_master`` table. This was resulting in errors in some Python installations where the SQLite library was compiled in a way that prevented this from working, in particular on macOS. Foreign keys are now added using the :ref:`table transformation ` mechanism instead. (:issue:`577`) This new mechanism creates a full copy of the table, so it is likely to be significantly slower for large tables, but will no longer trigger ``table sqlite_master may not be modified`` errors on platforms that do not support ``PRAGMA writable_schema = 1``. A new plugin, `sqlite-utils-fast-fks `__, is now available for developers who still want to use that faster but riskier implementation. Other changes: - The :ref:`table.transform() method ` has two new parameters: ``foreign_keys=`` allows you to replace the foreign key constraints defined on a table, and ``add_foreign_keys=`` lets you specify new foreign keys to add. These complement the existing ``drop_foreign_keys=`` parameter. (:issue:`577`) - The :ref:`sqlite-utils transform ` command has a new ``--add-foreign-key`` option which can be called multiple times to add foreign keys to a table that is being transformed. (:issue:`585`) - :ref:`sqlite-utils convert ` now has a ``--pdb`` option for opening a debugger on the first encountered error in your conversion script. (:issue:`581`) - Fixed a bug where ``sqlite-utils install -e '.[test]'`` option did not work correctly. .. _v3_34: 3.34 (2023-07-22) ----------------- This release introduces a new :ref:`plugin system `. Read more about this in `sqlite-utils now supports plugins `__. (:issue:`567`) - Documentation describing :ref:`how to build a plugin `. - Plugin hook: :ref:`plugins_hooks_register_commands`, for plugins to add extra commands to ``sqlite-utils``. (:issue:`569`) - Plugin hook: :ref:`plugins_hooks_prepare_connection`. Plugins can use this to help prepare the SQLite connection to do things like registering custom SQL functions. Thanks, `Alex Garcia `__. (:issue:`574`) - ``sqlite_utils.Database(..., execute_plugins=False)`` option for disabling plugin execution. (:issue:`575`) - ``sqlite-utils install -e path-to-directory`` option for installing editable code. This option is useful during the development of a plugin. (:issue:`570`) - ``table.create(...)`` method now accepts ``replace=True`` to drop and replace an existing table with the same name, or ``ignore=True`` to silently do nothing if a table already exists with the same name. (:issue:`568`) - ``sqlite-utils insert ... --stop-after 10`` option for stopping the insert after a specified number of records. Works for the ``upsert`` command as well. (:issue:`561`) - The ``--csv`` and ``--tsv`` modes for ``insert`` now accept a ``--empty-null`` option, which causes empty strings in the CSV file to be stored as ``null`` in the database. (:issue:`563`) - New ``db.rename_table(table_name, new_name)`` method for renaming tables. (:issue:`565`) - ``sqlite-utils rename-table my.db table_name new_name`` command for renaming tables. (:issue:`565`) - The ``table.transform(...)`` method now takes an optional ``keep_table=new_table_name`` parameter, which will cause the original table to be renamed to ``new_table_name`` rather than being dropped at the end of the transformation. (:issue:`571`) - Documentation now notes that calling ``table.transform()`` without any arguments will reformat the SQL schema stored by SQLite to be more aesthetically pleasing. (:issue:`564`) .. _v3_33: 3.33 (2023-06-25) ----------------- - ``sqlite-utils`` will now use `sqlean.py `__ in place of ``sqlite3`` if it is installed in the same virtual environment. This is useful for Python environments with either an outdated version of SQLite or with restrictions on SQLite such as disabled extension loading or restrictions resulting in the ``sqlite3.OperationalError: table sqlite_master may not be modified`` error. (:issue:`559`) - New ``with db.ensure_autocommit_off()`` context manager, which ensures that the database is in autocommit mode for the duration of a block of code. This is used by ``db.enable_wal()`` and ``db.disable_wal()`` to ensure they work correctly with ``pysqlite3`` and ``sqlean.py``. - New ``db.iterdump()`` method, providing an iterator over SQL strings representing a dump of the database. This uses ``sqlite-dump`` if it is available, otherwise falling back on the ``conn.iterdump()`` method from ``sqlite3``. Both ``pysqlite3`` and ``sqlean.py`` omit support for ``iterdump()`` - this method helps paper over that difference. .. _v3_32_1: 3.32.1 (2023-05-21) ------------------- - Examples in the :ref:`CLI documentation ` can now all be copied and pasted without needing to remove a leading ``$``. (:issue:`551`) - Documentation now covers :ref:`installation_completion` for ``bash`` and ``zsh``. (:issue:`552`) .. _v3_32: 3.32 (2023-05-21) ----------------- - New experimental ``sqlite-utils tui`` interface for interactively building command-line invocations, powered by `Trogon `__. This requires an optional dependency, installed using ``sqlite-utils install trogon``. There is a screenshot :ref:`in the documentation `. (:issue:`545`) - ``sqlite-utils analyze-tables`` command (:ref:`documentation `) now has a ``--common-limit 20`` option for changing the number of common/least-common values shown for each column. (:issue:`544`) - ``sqlite-utils analyze-tables --no-most`` and ``--no-least`` options for disabling calculation of most-common and least-common values. - If a column contains only ``null`` values, ``analyze-tables`` will no longer attempt to calculate the most common and least common values for that column. (:issue:`547`) - Calling ``sqlite-utils analyze-tables`` with non-existent columns in the ``-c/--column`` option now results in an error message. (:issue:`548`) - The ``table.analyze_column()`` method (:ref:`documented here `) now accepts ``most_common=False`` and ``least_common=False`` options for disabling calculation of those values. .. _v3_31: 3.31 (2023-05-08) ----------------- - Dropped support for Python 3.6. Tests now ensure compatibility with Python 3.11. (:issue:`517`) - Automatically locates the SpatiaLite extension on Apple Silicon. Thanks, Chris Amico. (`#536 `__) - New ``--raw-lines`` option for the ``sqlite-utils query`` and ``sqlite-utils memory`` commands, which outputs just the raw value of the first column of every row. (:issue:`539`) - Fixed a bug where ``table.upsert_all()`` failed if the ``not_null=`` option was passed. (:issue:`538`) - Fixed a ``ResourceWarning`` when using ``sqlite-utils insert``. (:issue:`534`) - Now shows a more detailed error message when ``sqlite-utils insert`` is called with invalid JSON. (:issue:`532`) - ``table.convert(..., skip_false=False)`` and ``sqlite-utils convert --no-skip-false`` options, for avoiding a misfeature where the :ref:`convert() ` mechanism skips rows in the database with a falsey value for the specified column. Fixing this by default would be a backwards-incompatible change and is under consideration for a 4.0 release in the future. (:issue:`527`) - Tables can now be created with self-referential foreign keys. Thanks, Scott Perry. (`#537 `__) - ``sqlite-utils transform`` no longer breaks if a table defines default values for columns. Thanks, Kenny Song. (:issue:`509`) - Fixed a bug where repeated calls to ``table.transform()`` did not work correctly. Thanks, Martin Carpenter. (:issue:`525`) - Improved error message if ``rows_from_file()`` is passed a non-binary-mode file-like object. (:issue:`520`) .. _v3_30: 3.30 (2022-10-25) ----------------- - Now tested against Python 3.11. (:issue:`502`) - New ``table.search_sql(include_rank=True)`` option, which adds a ``rank`` column to the generated SQL. Thanks, Jacob Chapman. (`#480 `__) - Progress bars now display for newline-delimited JSON files using the ``--nl`` option. Thanks, Mischa Untaga. (:issue:`485`) - New ``db.close()`` method. (:issue:`504`) - Conversion functions passed to :ref:`table.convert(...) ` can now return lists or dictionaries, which will be inserted into the database as JSON strings. (:issue:`495`) - ``sqlite-utils install`` and ``sqlite-utils uninstall`` commands for installing packages into the same virtual environment as ``sqlite-utils``, :ref:`described here `. (:issue:`483`) - New :ref:`sqlite_utils.utils.flatten() ` utility function. (:issue:`500`) - Documentation on :ref:`using Just ` to run tests, linters and build documentation. - Documentation now covers the :ref:`release_process` for this package. .. _v3_29: 3.29 (2022-08-27) ----------------- - The ``sqlite-utils query``, ``memory`` and ``bulk`` commands now all accept a new ``--functions`` option. This can be passed a string of Python code, and any callable objects defined in that code will be made available to SQL queries as custom SQL functions. See :ref:`cli_query_functions` for details. (:issue:`471`) - ``db[table].create(...)`` method now accepts a new ``transform=True`` parameter. If the table already exists it will be :ref:`transformed ` to match the schema configuration options passed to the function. This may result in columns being added or dropped, column types being changed, column order being updated or not null and default values for columns being set. (:issue:`467`) - Related to the above, the ``sqlite-utils create-table`` command now accepts a ``--transform`` option. - New introspection property: ``table.default_values`` returns a dictionary mapping each column name with a default value to the configured default value. (:issue:`475`) - The ``--load-extension`` option can now be provided a path to a compiled SQLite extension module accompanied by the name of an entrypoint, separated by a colon - for example ``--load-extension ./lines0:sqlite3_lines0_noread_init``. This feature is modelled on code first `contributed to Datasette `__ by Alex Garcia. (:issue:`470`) - Functions registered using the :ref:`db.register_function() ` method can now have a custom name specified using the new ``db.register_function(fn, name=...)`` parameter. (:issue:`458`) - :ref:`sqlite-utils rows ` has a new ``--order`` option for specifying the sort order for the returned rows. (:issue:`469`) - All of the CLI options that accept Python code blocks can now all be used to define functions that can access modules imported in that same block of code without needing to use the ``global`` keyword. (:issue:`472`) - Fixed bug where ``table.extract()`` would not behave correctly for columns containing null values. Thanks, Forest Gregg. (:issue:`423`) - New tutorial: `Cleaning data with sqlite-utils and Datasette `__ shows how to use ``sqlite-utils`` to import and clean an example CSV file. - Datasette and ``sqlite-utils`` now have a Discord community. `Join the Discord here `__. .. _v3_28: 3.28 (2022-07-15) ----------------- - New :ref:`table.duplicate(new_name) ` method for creating a copy of a table with a matching schema and row contents. Thanks, `David `__. (:issue:`449`) - New ``sqlite-utils duplicate data.db table_name new_name`` CLI command for :ref:`cli_duplicate_table`. (:issue:`454`) - ``sqlite_utils.utils.rows_from_file()`` is now a :ref:`documented API `. It can be used to read a sequence of dictionaries from a file-like object containing CSV, TSV, JSON or newline-delimited JSON. It can be passed an explicit format or can attempt to detect the format automatically. (:issue:`443`) - ``sqlite_utils.utils.TypeTracker`` is now a documented API for detecting the likely column types for a sequence of string rows, see :ref:`python_api_typetracker`. (:issue:`445`) - ``sqlite_utils.utils.chunks()`` is now a documented API for :ref:`splitting an iterator into chunks `. (:issue:`451`) - ``sqlite-utils enable-fts`` now has a ``--replace`` option for replacing the existing FTS configuration for a table. (:issue:`450`) - The ``create-index``, ``add-column`` and ``duplicate`` commands all now take a ``--ignore`` option for ignoring errors should the database not be in the right state for them to operate. (:issue:`450`) .. _v3_27: 3.27 (2022-06-14) ----------------- See also `the annotated release notes `__ for this release. - Documentation now uses the `Furo `__ Sphinx theme. (:issue:`435`) - Code examples in documentation now have a "copy to clipboard" button. (:issue:`436`) - ``sqlite_utils.utils.utils.rows_from_file()`` is now a documented API, see :ref:`python_api_rows_from_file`. (:issue:`443`) - ``rows_from_file()`` has two new parameters to help handle CSV files with rows that contain more values than are listed in that CSV file's headings: ``ignore_extras=True`` and ``extras_key="name-of-key"``. (:issue:`440`) - ``sqlite_utils.utils.maximize_csv_field_size_limit()`` helper function for increasing the field size limit for reading CSV files to its maximum, see :ref:`python_api_maximize_csv_field_size_limit`. (:issue:`442`) - ``table.search(where=, where_args=)`` parameters for adding additional ``WHERE`` clauses to a search query. The ``where=`` parameter is available on ``table.search_sql(...)`` as well. See :ref:`python_api_fts_search`. (:issue:`441`) - Fixed bug where ``table.detect_fts()`` and other search-related functions could fail if two FTS-enabled tables had names that were prefixes of each other. (:issue:`434`) .. _v3_26_1: 3.26.1 (2022-05-02) ------------------- - Now depends on `click-default-group-wheel `__, a pure Python wheel package. This means you can install and use this package with `Pyodide `__, which can run Python entirely in your browser using WebAssembly. (`#429 `__) Try that out using the `Pyodide REPL `__: .. code-block:: python >>> import micropip >>> await micropip.install("sqlite-utils") >>> import sqlite_utils >>> db = sqlite_utils.Database(memory=True) >>> list(db.query("select 3 * 5")) [{'3 * 5': 15}] .. _v3_26: 3.26 (2022-04-13) ----------------- - New ``errors=r.IGNORE/r.SET_NULL`` parameter for the ``r.parsedatetime()`` and ``r.parsedate()`` :ref:`convert recipes `. (:issue:`416`) - Fixed a bug where ``--multi`` could not be used in combination with ``--dry-run`` for the :ref:`convert ` command. (:issue:`415`) - New documentation: :ref:`cli_convert_complex`. (:issue:`420`) - More robust detection for whether or not ``deterministic=True`` is supported. (:issue:`425`) .. _v3_25_1: 3.25.1 (2022-03-11) ------------------- - Improved display of type information and parameters in the :ref:`API reference documentation `. (:issue:`413`) .. _v3_25: 3.25 (2022-03-01) ----------------- - New ``hash_id_columns=`` parameter for creating a primary key that's a hash of the content of specific columns - see :ref:`python_api_hash` for details. (:issue:`343`) - New :ref:`db.sqlite_version ` property, returning a tuple of integers representing the version of SQLite, for example ``(3, 38, 0)``. - Fixed a bug where :ref:`register_function(deterministic=True) ` caused errors on versions of SQLite prior to 3.8.3. (:issue:`408`) - New documented :ref:`hash_record(record, keys=...) ` function. .. _v3_24: 3.24 (2022-02-15) ----------------- - SpatiaLite helpers for the ``sqlite-utils`` command-line tool - thanks, Chris Amico. (:issue:`398`) - :ref:`sqlite-utils create-database ` ``--init-spatialite`` option for initializing SpatiaLite on a newly created database. - :ref:`sqlite-utils add-geometry-column ` command for adding geometry columns. - :ref:`sqlite-utils create-spatial-index ` command for adding spatial indexes. - ``db[table].create(..., if_not_exists=True)`` option for :ref:`creating a table ` only if it does not already exist. (:issue:`397`) - ``Database(memory_name="my_shared_database")`` parameter for creating a :ref:`named in-memory database ` that can be shared between multiple connections. (:issue:`405`) - Documentation now describes :ref:`how to add a primary key to a rowid table ` using ``sqlite-utils transform``. (:issue:`403`) .. _v3_23: 3.23 (2022-02-03) ----------------- This release introduces four new utility methods for working with `SpatiaLite `__. Thanks, Chris Amico. (`#385 `__) - ``sqlite_utils.utils.find_spatialite()`` :ref:`finds the location of the SpatiaLite module ` on disk. - ``db.init_spatialite()`` :ref:`initializes SpatiaLite ` for the given database. - ``table.add_geometry_column(...)`` :ref:`adds a geometry column ` to an existing table. - ``table.create_spatial_index(...)`` :ref:`creates a spatial index ` for a column. - ``sqlite-utils batch`` now accepts a ``--batch-size`` option. (:issue:`392`) .. _v3_22_1: 3.22.1 (2022-01-25) ------------------- - All commands now include example usage in their ``--help`` - see :ref:`cli_reference`. (:issue:`384`) - Python library documentation has a new :ref:`python_api_getting_started` section. (:issue:`387`) - Documentation now uses `Plausible analytics `__. (:issue:`389`) .. _v3_22: 3.22 (2022-01-11) ----------------- - New :ref:`cli_reference` documentation page, listing the output of ``--help`` for every one of the CLI commands. (:issue:`383`) - ``sqlite-utils rows`` now has ``--limit`` and ``--offset`` options for paginating through data. (:issue:`381`) - ``sqlite-utils rows`` now has ``--where`` and ``-p`` options for filtering the table using a ``WHERE`` query, see :ref:`cli_rows`. (:issue:`382`) .. _v3_21: 3.21 (2022-01-10) ----------------- CLI and Python library improvements to help run `ANALYZE `__ after creating indexes or inserting rows, to gain better performance from the SQLite query planner when it runs against indexes. Three new CLI commands: ``create-database``, ``analyze`` and ``bulk``. More details and examples can be found in `the annotated release notes `__. - New ``sqlite-utils create-database`` command for creating new empty database files. (:issue:`348`) - New Python methods for running ``ANALYZE`` against a database, table or index: ``db.analyze()`` and ``table.analyze()``, see :ref:`python_api_analyze`. (:issue:`366`) - New :ref:`sqlite-utils analyze command ` for running ``ANALYZE`` using the CLI. (:issue:`379`) - The ``create-index``, ``insert`` and ``upsert`` commands now have a new ``--analyze`` option for running ``ANALYZE`` after the command has completed. (:issue:`379`) - New :ref:`sqlite-utils bulk command ` which can import records in the same way as ``sqlite-utils insert`` (from JSON, CSV or TSV) and use them to bulk execute a parametrized SQL query. (:issue:`375`) - The CLI tool can now also be run using ``python -m sqlite_utils``. (:issue:`368`) - Using ``--fmt`` now implies ``--table``, so you don't need to pass both options. (:issue:`374`) - The ``--convert`` function applied to rows can now modify the row in place. (:issue:`371`) - The :ref:`insert-files command ` supports two new columns: ``stem`` and ``suffix``. (:issue:`372`) - The ``--nl`` import option now ignores blank lines in the input. (:issue:`376`) - Fixed bug where streaming input to the ``insert`` command with ``--batch-size 1`` would appear to only commit after several rows had been ingested, due to unnecessary input buffering. (:issue:`364`) .. _v3_20: 3.20 (2022-01-05) ----------------- - ``sqlite-utils insert ... --lines`` to insert the lines from a file into a table with a single ``line`` column, see :ref:`cli_insert_unstructured`. - ``sqlite-utils insert ... --text`` to insert the contents of the file into a table with a single ``text`` column and a single row. - ``sqlite-utils insert ... --convert`` allows a Python function to be provided that will be used to convert each row that is being inserted into the database. See :ref:`cli_insert_convert`, including details on special behavior when combined with ``--lines`` and ``--text``. (:issue:`356`) - ``sqlite-utils convert`` now accepts a code value of ``-`` to read code from standard input. (:issue:`353`) - ``sqlite-utils convert`` also now accepts code that defines a named ``convert(value)`` function, see :ref:`cli_convert`. - ``db.supports_strict`` property showing if the database connection supports `SQLite strict tables `__. - ``table.strict`` property (see :ref:`python_api_introspection_strict`) indicating if the table uses strict mode. (:issue:`344`) - Fixed bug where ``sqlite-utils upsert ... --detect-types`` ignored the ``--detect-types`` option. (:issue:`362`) .. _v3_19: 3.19 (2021-11-20) ----------------- - The :ref:`table.lookup() method ` now accepts keyword arguments that match those on the underlying ``table.insert()`` method: ``foreign_keys=``, ``column_order=``, ``not_null=``, ``defaults=``, ``extracts=``, ``conversions=`` and ``columns=``. You can also now pass ``pk=`` to specify a different column name to use for the primary key. (:issue:`342`) .. _v3_18: 3.18 (2021-11-14) ----------------- - The ``table.lookup()`` method now has an optional second argument which can be used to populate columns only the first time the record is created, see :ref:`python_api_lookup_tables`. (:issue:`339`) - ``sqlite-utils memory`` now has a ``--flatten`` option for :ref:`flattening nested JSON objects ` into separate columns, consistent with ``sqlite-utils insert``. (:issue:`332`) - ``table.create_index(..., find_unique_name=True)`` parameter, which finds an available name for the created index even if the default name has already been taken. This means that ``index-foreign-keys`` will work even if one of the indexes it tries to create clashes with an existing index name. (:issue:`335`) - Added ``py.typed`` to the module, so `mypy `__ should now correctly pick up the type annotations. Thanks, Andreas Longo. (:issue:`331`) - Now depends on ``python-dateutil`` instead of depending on ``dateutils``. Thanks, Denys Pavlov. (:issue:`324`) - ``table.create()`` (see :ref:`python_api_explicit_create`) now handles ``dict``, ``list`` and ``tuple`` types, mapping them to ``TEXT`` columns in SQLite so that they can be stored encoded as JSON. (:issue:`338`) - Inserted data with square braces in the column names (for example a CSV file containing a ``item[price]``) column now have the braces converted to underscores: ``item_price_``. Previously such columns would be rejected with an error. (:issue:`329`) - Now also tested against Python 3.10. (`#330 `__) .. _v3_17.1: 3.17.1 (2021-09-22) ------------------- - :ref:`sqlite-utils memory ` now works if files passed to it share the same file name. (:issue:`325`) - :ref:`sqlite-utils query ` now returns ``[]`` in JSON mode if no rows are returned. (:issue:`328`) .. _v3_17: 3.17 (2021-08-24) ----------------- - The :ref:`sqlite-utils memory ` command has a new ``--analyze`` option, which runs the equivalent of the :ref:`analyze-tables ` command directly against the in-memory database created from the incoming CSV or JSON data. (:issue:`320`) - :ref:`sqlite-utils insert-files ` now has the ability to insert file contents in to ``TEXT`` columns in addition to the default ``BLOB``. Pass the ``--text`` option or use ``content_text`` as a column specifier. (:issue:`319`) .. _v3_16: 3.16 (2021-08-18) ----------------- - Type signatures added to more methods, including ``table.resolve_foreign_keys()``, ``db.create_table_sql()``, ``db.create_table()`` and ``table.create()``. (:issue:`314`) - New ``db.quote_fts(value)`` method, see :ref:`python_api_quote_fts` - thanks, Mark Neumann. (:issue:`246`) - ``table.search()`` now accepts an optional ``quote=True`` parameter. (:issue:`296`) - CLI command ``sqlite-utils search`` now accepts a ``--quote`` option. (:issue:`296`) - Fixed bug where ``--no-headers`` and ``--tsv`` options to :ref:`sqlite-utils insert ` could not be used together. (:issue:`295`) - Various small improvements to :ref:`reference` documentation. .. _v3_15.1: 3.15.1 (2021-08-10) ------------------- - Python library now includes type annotations on almost all of the methods, plus detailed docstrings describing each one. (:issue:`311`) - New :ref:`reference` documentation page, powered by those docstrings. - Fixed bug where ``.add_foreign_keys()`` failed to raise an error if called against a ``View``. (:issue:`313`) - Fixed bug where ``.delete_where()`` returned a ``[]`` instead of returning ``self`` if called against a non-existent table. (:issue:`315`) .. _v3_15: 3.15 (2021-08-09) ----------------- - ``sqlite-utils insert --flatten`` option for :ref:`flattening nested JSON objects ` to create tables with column names like ``topkey_nestedkey``. (:issue:`310`) - Fixed several spelling mistakes in the documentation, spotted `using codespell `__. - Errors that occur while using the ``sqlite-utils`` CLI tool now show the responsible SQL and query parameters, if possible. (:issue:`309`) .. _v3_14: 3.14 (2021-08-02) ----------------- This release introduces the new :ref:`sqlite-utils convert command ` (:issue:`251`) and corresponding :ref:`table.convert(...) ` Python method (:issue:`302`). These tools can be used to apply a Python conversion function to one or more columns of a table, either updating the column in place or using transformed data from that column to populate one or more other columns. This command-line example uses the Python standard library `textwrap module `__ to wrap the content of the ``content`` column in the ``articles`` table to 100 characters:: $ sqlite-utils convert content.db articles content \ '"\n".join(textwrap.wrap(value, 100))' \ --import=textwrap The same operation in Python code looks like this: .. code-block:: python import sqlite_utils, textwrap db = sqlite_utils.Database("content.db") db["articles"].convert("content", lambda v: "\n".join(textwrap.wrap(v, 100))) See the full documentation for the :ref:`sqlite-utils convert command ` and the :ref:`table.convert(...) ` Python method for more details. Also in this release: - The new ``table.count_where(...)`` method, for counting rows in a table that match a specific SQL ``WHERE`` clause. (:issue:`305`) - New ``--silent`` option for the :ref:`sqlite-utils insert-files command ` to hide the terminal progress bar, consistent with the ``--silent`` option for ``sqlite-utils convert``. (:issue:`301`) .. _v3_13: 3.13 (2021-07-24) ----------------- - ``sqlite-utils schema my.db table1 table2`` command now accepts optional table names. (:issue:`299`) - ``sqlite-utils memory --help`` now describes the ``--schema`` option. .. _v3_12: 3.12 (2021-06-25) ----------------- - New :ref:`db.query(sql, params) ` method, which executes a SQL query and returns the results as an iterator over Python dictionaries. (:issue:`290`) - This project now uses ``flake8`` and has started to use ``mypy``. (:issue:`291`) - New documentation on :ref:`contributing ` to this project. (:issue:`292`) .. _v3_11: 3.11 (2021-06-20) ----------------- - New ``sqlite-utils memory data.csv --schema`` option, for outputting the schema of the in-memory database generated from one or more files. See :ref:`cli_memory_schema_dump_save`. (:issue:`288`) - Added :ref:`installation instructions `. (:issue:`286`) .. _v3_10: 3.10 (2021-06-19) ----------------- This release introduces the ``sqlite-utils memory`` command, which can be used to load CSV or JSON data into a temporary in-memory database and run SQL queries (including joins across multiple files) directly against that data. Also new: ``sqlite-utils insert --detect-types``, ``sqlite-utils dump``, ``table.use_rowid`` plus some smaller fixes. sqlite-utils memory ~~~~~~~~~~~~~~~~~~~ This example of ``sqlite-utils memory`` retrieves information about the all of the repositories in the `Dogsheep `__ organization on GitHub using `this JSON API `__, sorts them by their number of stars and outputs a table of the top five (using ``-t``):: $ curl -s 'https://api.github.com/users/dogsheep/repos' \ | sqlite-utils memory - ' select full_name, forks_count, stargazers_count from stdin order by stargazers_count desc limit 5 ' -t full_name forks_count stargazers_count --------------------------------- ------------- ------------------ dogsheep/twitter-to-sqlite 12 225 dogsheep/github-to-sqlite 14 139 dogsheep/dogsheep-photos 5 116 dogsheep/dogsheep.github.io 7 90 dogsheep/healthkit-to-sqlite 4 85 The tool works against files on disk as well. This example joins data from two CSV files:: $ cat creatures.csv species_id,name 1,Cleo 2,Bants 2,Dori 2,Azi $ cat species.csv id,species_name 1,Dog 2,Chicken $ sqlite-utils memory species.csv creatures.csv ' select * from creatures join species on creatures.species_id = species.id ' [{"species_id": 1, "name": "Cleo", "id": 1, "species_name": "Dog"}, {"species_id": 2, "name": "Bants", "id": 2, "species_name": "Chicken"}, {"species_id": 2, "name": "Dori", "id": 2, "species_name": "Chicken"}, {"species_id": 2, "name": "Azi", "id": 2, "species_name": "Chicken"}] Here the ``species.csv`` file becomes the ``species`` table, the ``creatures.csv`` file becomes the ``creatures`` table and the output is JSON, the default output format. You can also use the ``--attach`` option to attach existing SQLite database files to the in-memory database, in order to join data from CSV or JSON directly against your existing tables. Full documentation of this new feature is available in :ref:`cli_memory`. (:issue:`272`) sqlite-utils insert \-\-detect-types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :ref:`sqlite-utils insert ` command can be used to insert data from JSON, CSV or TSV files into a SQLite database file. The new ``--detect-types`` option (shortcut ``-d``), when used in conjunction with a CSV or TSV import, will automatically detect if columns in the file are integers or floating point numbers as opposed to treating everything as a text column and create the new table with the corresponding schema. See :ref:`cli_insert_csv_tsv` for details. (:issue:`282`) Other changes ~~~~~~~~~~~~~ - **Bug fix**: ``table.transform()``, when run against a table without explicit primary keys, would incorrectly create a new version of the table with an explicit primary key column called ``rowid``. (:issue:`284`) - New ``table.use_rowid`` introspection property, see :ref:`python_api_introspection_use_rowid`. (:issue:`285`) - The new ``sqlite-utils dump file.db`` command outputs a SQL dump that can be used to recreate a database. (:issue:`274`) - ``-h`` now works as a shortcut for ``--help``, thanks Loren McIntyre. (:issue:`276`) - Now using `pytest-cov `__ and `Codecov `__ to track test coverage - currently at 96%. (:issue:`275`) - SQL errors that occur when using ``sqlite-utils query`` are now displayed as CLI errors. .. _v3_9_1: 3.9.1 (2021-06-12) ------------------ - Fixed bug when using ``table.upsert_all()`` to create a table with only a single column that is treated as the primary key. (:issue:`271`) .. _v3_9: 3.9 (2021-06-11) ---------------- - New ``sqlite-utils schema`` command showing the full SQL schema for a database, see :ref:`Showing the schema (CLI)`. (:issue:`268`) - ``db.schema`` introspection property exposing the same feature to the Python library, see :ref:`Showing the schema (Python library) `. .. _v3_8: 3.8 (2021-06-02) ---------------- - New ``sqlite-utils indexes`` command to list indexes in a database, see :ref:`cli_indexes`. (:issue:`263`) - ``table.xindexes`` introspection property returning more details about that table's indexes, see :ref:`python_api_introspection_xindexes`. (:issue:`261`) .. _v3_7: 3.7 (2021-05-28) ---------------- - New ``table.pks_and_rows_where()`` method returning ``(primary_key, row_dictionary)`` tuples - see :ref:`python_api_pks_and_rows_where`. (:issue:`240`) - Fixed bug with ``table.add_foreign_key()`` against columns containing spaces. (:issue:`238`) - ``table_or_view.drop(ignore=True)`` option for avoiding errors if the table or view does not exist. (:issue:`237`) - ``sqlite-utils drop-view --ignore`` and ``sqlite-utils drop-table --ignore`` options. (:issue:`237`) - Fixed a bug with inserts of nested JSON containing non-ascii strings - thanks, Dylan Wu. (:issue:`257`) - Suggest ``--alter`` if an error occurs caused by a missing column. (:issue:`259`) - Support creating indexes with columns in descending order, see :ref:`API documentation ` and :ref:`CLI documentation `. (:issue:`260`) - Correctly handle CSV files that start with a UTF-8 BOM. (:issue:`250`) .. _v3_6: 3.6 (2021-02-18) ---------------- This release adds the ability to execute queries joining data from more than one database file - similar to the cross database querying feature introduced in `Datasette 0.55 `__. - The ``db.attach(alias, filepath)`` Python method can be used to attach extra databases to the same connection, see :ref:`db.attach() in the Python API documentation `. (:issue:`113`) - The ``--attach`` option attaches extra aliased databases to run SQL queries against directly on the command-line, see :ref:`attaching additional databases in the CLI documentation `. (:issue:`236`) .. _v3_5: 3.5 (2021-02-14) ---------------- - ``sqlite-utils insert --sniff`` option for detecting the delimiter and quote character used by a CSV file, see :ref:`cli_insert_csv_tsv_delimiter`. (:issue:`230`) - The ``table.rows_where()``, ``table.search()`` and ``table.search_sql()`` methods all now take optional ``offset=`` and ``limit=`` arguments. (:issue:`231`) - New ``--no-headers`` option for ``sqlite-utils insert --csv`` to handle CSV files that are missing the header row, see :ref:`cli_insert_csv_tsv_no_header`. (:issue:`228`) - Fixed bug where inserting data with extra columns in subsequent chunks would throw an error. Thanks `@nieuwenhoven `__ for the fix. (:issue:`234`) - Fixed bug importing CSV files with columns containing more than 128KB of data. (:issue:`229`) - Test suite now runs in CI against Ubuntu, macOS and Windows. Thanks `@nieuwenhoven `__ for the Windows test fixes. (:issue:`232`) .. _v3_4_1: 3.4.1 (2021-02-05) ------------------ - Fixed a code import bug that slipped in to 3.4. (:issue:`226`) .. _v3_4: 3.4 (2021-02-05) ---------------- - ``sqlite-utils insert --csv`` now accepts optional ``--delimiter`` and ``--quotechar`` options. See :ref:`cli_insert_csv_tsv_delimiter`. (:issue:`223`) .. _v3_3: 3.3 (2021-01-17) ---------------- - The ``table.m2m()`` method now accepts an optional ``alter=True`` argument to specify that any missing columns should be added to the referenced table. See :ref:`python_api_m2m`. (:issue:`222`) .. _v3_2_1: 3.2.1 (2021-01-12) ------------------ - Fixed a bug where ``.add_missing_columns()`` failed to take case insensitive column names into account. (:issue:`221`) .. _v3_2: 3.2 (2021-01-03) ---------------- This release introduces a new mechanism for speeding up ``count(*)`` queries using cached table counts, stored in a ``_counts`` table and updated by triggers. This mechanism is described in :ref:`python_api_cached_table_counts`, and can be enabled using Python API methods or the new ``enable-counts`` CLI command. (:issue:`212`) - ``table.enable_counts()`` method for enabling these triggers on a specific table. - ``db.enable_counts()`` method for enabling triggers on every table in the database. (:issue:`213`) - New ``sqlite-utils enable-counts my.db`` command for enabling counts on all or specific tables, see :ref:`cli_enable_counts`. (:issue:`214`) - New ``sqlite-utils triggers`` command for listing the triggers defined for a database or specific tables, see :ref:`cli_triggers`. (:issue:`218`) - New ``db.use_counts_table`` property which, if ``True``, causes ``table.count`` to read from the ``_counts`` table. (:issue:`215`) - ``table.has_counts_triggers`` property revealing if a table has been configured with the new ``_counts`` database triggers. - ``db.reset_counts()`` method and ``sqlite-utils reset-counts`` command for resetting the values in the ``_counts`` table. (:issue:`219`) - The previously undocumented ``db.escape()`` method has been renamed to ``db.quote()`` and is now covered by the documentation: :ref:`python_api_quote`. (:issue:`217`) - New ``table.triggers_dict`` and ``db.triggers_dict`` introspection properties. (:issue:`211`, :issue:`216`) - ``sqlite-utils insert`` now shows a more useful error message for invalid JSON. (:issue:`206`) .. _v3_1_1: 3.1.1 (2021-01-01) ------------------ - Fixed failing test caused by ``optimize`` sometimes creating larger database files. (:issue:`209`) - Documentation now lives on https://sqlite-utils.datasette.io/ - README now includes ``brew install sqlite-utils`` installation method. .. _v3_1: 3.1 (2020-12-12) ---------------- - New command: ``sqlite-utils analyze-tables my.db`` outputs useful information about the table columns in the database, such as the number of distinct values and how many rows are null. See :ref:`cli_analyze_tables` for documentation. (:issue:`207`) - New ``table.analyze_column(column)`` Python method used by the ``analyze-tables`` command - see :ref:`python_api_analyze_column`. - The ``table.update()`` method now correctly handles values that should be stored as JSON. Thanks, Andreas Madsack. (`#204 `__) .. _v3_0: 3.0 (2020-11-08) ---------------- This release introduces a new ``sqlite-utils search`` command for searching tables, see :ref:`cli_search`. (:issue:`192`) The ``table.search()`` method has been redesigned, see :ref:`python_api_fts_search`. (:issue:`197`) The release includes minor backwards-incompatible changes, hence the version bump to 3.0. Those changes, which should not affect most users, are: - The ``-c`` shortcut option for outputting CSV is no longer available. The full ``--csv`` option is required instead. - The ``-f`` shortcut for ``--fmt`` has also been removed - use ``--fmt``. - The ``table.search()`` method now defaults to sorting by relevance, not sorting by ``rowid``. (:issue:`198`) - The ``table.search()`` method now returns a generator over a list of Python dictionaries. It previously returned a list of tuples. Also in this release: - The ``query``, ``tables``, ``rows`` and ``search`` CLI commands now accept a new ``--tsv`` option which outputs the results in TSV. (:issue:`193`) - A new ``table.virtual_table_using`` property reveals if a table is a virtual table, and returns the upper case type of virtual table (e.g. ``FTS4`` or ``FTS5``) if it is. It returns ``None`` if the table is not a virtual table. (:issue:`196`) - The new ``table.search_sql()`` method returns the SQL for searching a table, see :ref:`python_api_fts_search_sql`. - ``sqlite-utils rows`` now accepts multiple optional ``-c`` parameters specifying the columns to return. (:issue:`200`) Changes since the 3.0a0 alpha release: - The ``sqlite-utils search`` command now defaults to returning every result, unless you add a ``--limit 20`` option. - The ``sqlite-utils search -c`` and ``table.search(columns=[])`` options are now fully respected. (:issue:`201`) .. _v2_23: 2.23 (2020-10-28) ----------------- - ``table.m2m(other_table, records)`` method now takes any iterable, not just a list or tuple. Thanks, Adam Wolf. (`#189 `__) - ``sqlite-utils insert`` now displays a progress bar for CSV or TSV imports. (:issue:`173`) - New ``@db.register_function(deterministic=True)`` option for registering deterministic SQLite functions in Python 3.8 or higher. (:issue:`191`) .. _v2_22: 2.22 (2020-10-16) ----------------- - New ``--encoding`` option for processing CSV and TSV files that use a non-utf-8 encoding, for both the ``insert`` and ``update`` commands. (:issue:`182`) - The ``--load-extension`` option is now available to many more commands. (:issue:`137`) - ``--load-extension=spatialite`` can be used to load SpatiaLite from common installation locations, if it is available. (:issue:`136`) - Tests now also run against Python 3.9. (:issue:`184`) - Passing ``pk=["id"]`` now has the same effect as passing ``pk="id"``. (:issue:`181`) .. _v2_21: 2.21 (2020-09-24) ----------------- - ``table.extract()`` and ``sqlite-utils extract`` now apply much, much faster - one example operation reduced from twelve minutes to just four seconds! (:issue:`172`) - ``sqlite-utils extract`` no longer shows a progress bar, because it's fast enough not to need one. - New ``column_order=`` option for ``table.transform()`` which can be used to alter the order of columns in a table. (:issue:`175`) - ``sqlite-utils transform --column-order=`` option (with a ``-o`` shortcut) for changing column order. (:issue:`176`) - The ``table.transform(drop_foreign_keys=)`` parameter and the ``sqlite-utils transform --drop-foreign-key`` option have changed. They now accept just the name of the column rather than requiring all three of the column, other table and other column. This is technically a backwards-incompatible change but I chose not to bump the major version number because the transform feature is so new. (:issue:`177`) - The table ``.disable_fts()``, ``.rebuild_fts()``, ``.delete()``, ``.delete_where()`` and ``.add_missing_columns()`` methods all now ``return self``, which means they can be chained together with other table operations. .. _v2_20: 2.20 (2020-09-22) ----------------- This release introduces two key new capabilities: **transform** (:issue:`114`) and **extract** (:issue:`42`). Transform ~~~~~~~~~ SQLite's ALTER TABLE has `several documented limitations `__. The ``table.transform()`` Python method and ``sqlite-utils transform`` CLI command work around these limitations using a pattern where a new table with the desired structure is created, data is copied over to it and the old table is then dropped and replaced by the new one. You can use these tools to change column types, rename columns, drop columns, add and remove ``NOT NULL`` and defaults, remove foreign key constraints and more. See the :ref:`transforming tables (CLI) ` and :ref:`transforming tables (Python library) ` documentation for full details of how to use them. Extract ~~~~~~~ Sometimes a database table - especially one imported from a CSV file - will contain duplicate data. A ``Trees`` table may include a ``Species`` column with only a few dozen unique values, when the table itself contains thousands of rows. The ``table.extract()`` method and ``sqlite-utils extract`` commands can extract a column - or multiple columns - out into a separate lookup table, and set up a foreign key relationship from the original table. The Python library :ref:`extract() documentation ` describes how extraction works in detail, and :ref:`cli_extract` in the CLI documentation includes a detailed example. Other changes ~~~~~~~~~~~~~ - The ``@db.register_function`` decorator can be used to quickly register Python functions as custom SQL functions, see :ref:`python_api_register_function`. (:issue:`162`) - The ``table.rows_where()`` method now accepts an optional ``select=`` argument for specifying which columns should be selected, see :ref:`python_api_rows`. .. _v2_19: 2.19 (2020-09-20) ----------------- - New ``sqlite-utils add-foreign-keys`` command for :ref:`cli_add_foreign_keys`. (:issue:`157`) - New ``table.enable_fts(..., replace=True)`` argument for replacing an existing FTS table with a new configuration. (:issue:`160`) - New ``table.add_foreign_key(..., ignore=True)`` argument for ignoring a foreign key if it already exists. (:issue:`112`) .. _v2_18: 2.18 (2020-09-08) ----------------- - ``table.rebuild_fts()`` method for rebuilding a FTS index, see :ref:`python_api_fts_rebuild`. (:issue:`155`) - ``sqlite-utils rebuild-fts data.db`` command for rebuilding FTS indexes across all tables, or just specific tables. (:issue:`155`) - ``table.optimize()`` method no longer deletes junk rows from the ``*_fts_docsize`` table. This was added in 2.17 but it turns out running ``table.rebuild_fts()`` is a better solution to this problem. - Fixed a bug where rows with additional columns that are inserted after the first batch of records could cause an error due to breaking SQLite's maximum number of parameters. Thanks, Simon Wiles. (:issue:`145`) .. _v2_17: 2.17 (2020-09-07) ----------------- This release handles a bug where replacing rows in FTS tables could result in growing numbers of unnecessary rows in the associated ``*_fts_docsize`` table. (:issue:`149`) - ``PRAGMA recursive_triggers=on`` by default for all connections. You can turn it off with ``Database(recursive_triggers=False)``. (:issue:`152`) - ``table.optimize()`` method now deletes unnecessary rows from the ``*_fts_docsize`` table. (:issue:`153`) - New tracer method for tracking underlying SQL queries, see :ref:`python_api_tracing`. (:issue:`150`) - Neater indentation for schema SQL. (:issue:`148`) - Documentation for ``sqlite_utils.AlterError`` exception thrown by in ``add_foreign_keys()``. .. _v2_16_1: 2.16.1 (2020-08-28) ------------------- - ``insert_all(..., alter=True)`` now works for columns introduced after the first 100 records. Thanks, Simon Wiles! (:issue:`139`) - Continuous Integration is now powered by GitHub Actions. (:issue:`143`) .. _v2_16: 2.16 (2020-08-21) ----------------- - ``--load-extension`` option for ``sqlite-utils query`` for loading SQLite extensions. (:issue:`134`) - New ``sqlite_utils.utils.find_spatialite()`` function for finding SpatiaLite in common locations. (:issue:`135`) .. _v2_15_1: 2.15.1 (2020-08-12) ------------------- - Now available as a ``sdist`` package on PyPI in addition to a wheel. (:issue:`133`) .. _v2_15: 2.15 (2020-08-10) ----------------- - New ``db.enable_wal()`` and ``db.disable_wal()`` methods for enabling and disabling `Write-Ahead Logging `__ for a database file - see :ref:`python_api_wal` in the Python API documentation. - Also ``sqlite-utils enable-wal file.db`` and ``sqlite-utils disable-wal file.db`` commands for doing the same thing on the command-line, see :ref:`WAL mode (CLI) `. (:issue:`132`) .. _v2_14_1: 2.14.1 (2020-08-05) ------------------- - Documentation improvements. .. _v2_14: 2.14 (2020-08-01) ----------------- - The :ref:`insert-files command ` can now read from standard input: ``cat dog.jpg | sqlite-utils insert-files dogs.db pics - --name=dog.jpg``. (:issue:`127`) - You can now specify a full-text search tokenizer using the new ``tokenize=`` parameter to :ref:`enable_fts() `. This means you can enable Porter stemming on a table by running ``db["articles"].enable_fts(["headline", "body"], tokenize="porter")``. (:issue:`130`) - You can also set a custom tokenizer using the :ref:`sqlite-utils enable-fts ` CLI command, via the new ``--tokenize`` option. .. _v2_13: 2.13 (2020-07-29) ----------------- - ``memoryview`` and ``uuid.UUID`` objects are now supported. ``memoryview`` objects will be stored using ``BLOB`` and ``uuid.UUID`` objects will be stored using ``TEXT``. (:issue:`128`) .. _v2_12: 2.12 (2020-07-27) ----------------- The theme of this release is better tools for working with binary data. The new ``insert-files`` command can be used to insert binary files directly into a database table, and other commands have been improved with better support for BLOB columns. - ``sqlite-utils insert-files my.db gifs *.gif`` can now insert the contents of files into a specified table. The columns in the table can be customized to include different pieces of metadata derived from the files. See :ref:`cli_insert_files`. (:issue:`122`) - ``--raw`` option to ``sqlite-utils query`` - for outputting just a single raw column value - see :ref:`cli_query_raw`. (:issue:`123`) - JSON output now encodes BLOB values as special base64 objects - see :ref:`cli_query_json`. (:issue:`125`) - The same format of JSON base64 objects can now be used to insert binary data - see :ref:`cli_inserting_data`. (:issue:`126`) - The ``sqlite-utils query`` command can now accept named parameters, e.g. ``sqlite-utils :memory: "select :num * :num2" -p num 5 -p num2 6`` - see :ref:`cli_query_json`. (:issue:`124`) .. _v2_11: 2.11 (2020-07-08) ----------------- - New ``--truncate`` option to ``sqlite-utils insert``, and ``truncate=True`` argument to ``.insert_all()``. Thanks, Thomas Sibley. (`#118 `__) - The ``sqlite-utils query`` command now runs updates in a transaction. Thanks, Thomas Sibley. (`#120 `__) .. _v2_10_1: 2.10.1 (2020-06-23) ------------------- - Added documentation for the ``table.pks`` introspection property. (:issue:`116`) .. _v2_10: 2.10 (2020-06-12) ----------------- - The ``sqlite-utils`` command now supports UPDATE/INSERT/DELETE in addition to SELECT. (:issue:`115`) .. _v2_9_1: 2.9.1 (2020-05-11) ------------------ - Added custom project links to the `PyPI listing `__. .. _v2_9: 2.9 (2020-05-10) ---------------- - New ``sqlite-utils drop-table`` command, see :ref:`cli_drop_table`. (:issue:`111`) - New ``sqlite-utils drop-view`` command, see :ref:`cli_drop_view`. - Python ``decimal.Decimal`` objects are now stored as ``FLOAT``. (:issue:`110`) .. _v2_8: 2.8 (2020-05-03) ---------------- - New ``sqlite-utils create-table`` command, see :ref:`cli_create_table`. (:issue:`27`) - New ``sqlite-utils create-view`` command, see :ref:`cli_create_view`. (:issue:`107`) .. _v2_7.2: 2.7.2 (2020-05-02) ------------------ - ``db.create_view(...)`` now has additional parameters ``ignore=True`` or ``replace=True``, see :ref:`python_api_create_view`. (:issue:`106`) .. _v2_7.1: 2.7.1 (2020-05-01) ------------------ - New ``sqlite-utils views my.db`` command for listing views in a database, see :ref:`cli_views`. (:issue:`105`) - ``sqlite-utils tables`` (and ``views``) has a new ``--schema`` option which outputs the table/view schema, see :ref:`cli_tables`. (:issue:`104`) - Nested structures containing invalid JSON values (e.g. Python bytestrings) are now serialized using ``repr()`` instead of throwing an error. (:issue:`102`) .. _v2_7: 2.7 (2020-04-17) ---------------- - New ``columns=`` argument for the ``.insert()``, ``.insert_all()``, ``.upsert()`` and ``.upsert_all()`` methods, for over-riding the auto-detected types for columns and specifying additional columns that should be added when the table is created. See :ref:`python_api_custom_columns`. (:issue:`100`) .. _v2_6: 2.6 (2020-04-15) ---------------- - New ``table.rows_where(..., order_by="age desc")`` argument, see :ref:`python_api_rows`. (:issue:`76`) .. _v2_5: 2.5 (2020-04-12) ---------------- - Panda's Timestamp is now stored as a SQLite TEXT column. Thanks, b0b5h4rp13! (:issue:`96`) - ``table.last_pk`` is now only available for inserts or upserts of a single record. (:issue:`98`) - New ``Database(filepath, recreate=True)`` parameter for deleting and recreating the database. (:issue:`97`) .. _v2_4_4: 2.4.4 (2020-03-23) ------------------ - Fixed bug where columns with only null values were not correctly created. (:issue:`95`) .. _v2_4_3: 2.4.3 (2020-03-23) ------------------ - Column type suggestion code is no longer confused by null values. (:issue:`94`) .. _v2_4_2: 2.4.2 (2020-03-14) ------------------ - ``table.column_dicts`` now works with all column types - previously it would throw errors on types other than ``TEXT``, ``BLOB``, ``INTEGER`` or ``FLOAT``. (:issue:`92`) - Documentation for ``NotFoundError`` thrown by ``table.get(pk)`` - see :ref:`python_api_get`. .. _v2_4_1: 2.4.1 (2020-03-01) ------------------ - ``table.enable_fts()`` now works with columns that contain spaces. (:issue:`90`) .. _v2_4: 2.4 (2020-02-26) ---------------- - ``table.disable_fts()`` can now be used to remove FTS tables and triggers that were created using ``table.enable_fts(...)``. (:issue:`88`) - The ``sqlite-utils disable-fts`` command can be used to remove FTS tables and triggers from the command-line. (:issue:`88`) - Trying to create table columns with square braces ([ or ]) in the name now raises an error. (:issue:`86`) - Subclasses of ``dict``, ``list`` and ``tuple`` are now detected as needing a JSON column. (:issue:`87`) .. _v2_3_1: 2.3.1 (2020-02-10) ------------------ ``table.create_index()`` now works for columns that contain spaces. (:issue:`85`) .. _v2_3: 2.3 (2020-02-08) ---------------- ``table.exists()`` is now a method, not a property. This was not a documented part of the API before so I'm considering this a non-breaking change. (:issue:`83`) .. _v2_2_1: 2.2.1 (2020-02-06) ------------------ Fixed a bug where ``.upsert(..., hash_id="pk")`` threw an error (:issue:`84`). .. _v2_2: 2.2 (2020-02-01) ---------------- New feature: ``sqlite_utils.suggest_column_types([records])`` returns the suggested column types for a list of records. See :ref:`python_api_suggest_column_types`. (:issue:`81`). This replaces the undocumented ``table.detect_column_types()`` method. .. _v2_1: 2.1 (2020-01-30) ---------------- New feature: ``conversions={...}`` can be passed to the ``.insert()`` family of functions to specify SQL conversions that should be applied to values that are being inserted or updated. See :ref:`python_api_conversions` . (`#77 `__). .. _v2_0_1: 2.0.1 (2020-01-05) ------------------ The ``.upsert()`` and ``.upsert_all()`` methods now raise a ``sqlite_utils.db.PrimaryKeyRequired`` exception if you call them without specifying the primary key column using ``pk=`` (:issue:`73`). .. _v2: 2.0 (2019-12-29) ---------------- This release changes the behaviour of ``upsert``. It's a breaking change, hence ``2.0``. The ``upsert`` command-line utility and the ``.upsert()`` and ``.upsert_all()`` Python API methods have had their behaviour altered. They used to completely replace the affected records: now, they update the specified values on existing records but leave other columns unaffected. See :ref:`Upserting data using the Python API ` and :ref:`Upserting data using the CLI ` for full details. If you want the old behaviour - where records were completely replaced - you can use ``$ sqlite-utils insert ... --replace`` on the command-line and ``.insert(..., replace=True)`` and ``.insert_all(..., replace=True)`` in the Python API. See :ref:`Insert-replacing data using the Python API ` and :ref:`Insert-replacing data using the CLI ` for more. For full background on this change, see `issue #66 `__. .. _v1_12_1: 1.12.1 (2019-11-06) ------------------- - Fixed error thrown when ``.insert_all()`` and ``.upsert_all()`` were called with empty lists (:issue:`52`) .. _v1_12: 1.12 (2019-11-04) ----------------- Python library utilities for deleting records (:issue:`62`) - ``db["tablename"].delete(4)`` to delete by primary key, see :ref:`python_api_delete` - ``db["tablename"].delete_where("id > ?", [3])`` to delete by a where clause, see :ref:`python_api_delete_where` .. _v1_11: 1.11 (2019-09-02) ----------------- Option to create triggers to automatically keep FTS tables up-to-date with newly inserted, updated and deleted records. Thanks, Amjith Ramanujam! (`#57 `__) - ``sqlite-utils enable-fts ... --create-triggers`` - see :ref:`Configuring full-text search using the CLI ` - ``db["tablename"].enable_fts(..., create_triggers=True)`` - see :ref:`Configuring full-text search using the Python library ` - Support for introspecting triggers for a database or table - see :ref:`python_api_introspection` (:issue:`59`) .. _v1_10: 1.10 (2019-08-23) ----------------- Ability to introspect and run queries against views (:issue:`54`) - ``db.view_names()`` method and and ``db.views`` property - Separate ``View`` and ``Table`` classes, both subclassing new ``Queryable`` class - ``view.drop()`` method See :ref:`python_api_views`. .. _v1_9: 1.9 (2019-08-04) ---------------- - ``table.m2m(...)`` method for creating many-to-many relationships: :ref:`python_api_m2m` (:issue:`23`) .. _v1_8: 1.8 (2019-07-28) ---------------- - ``table.update(pk, values)`` method: :ref:`python_api_update` (:issue:`35`) .. _v1_7_1: 1.7.1 (2019-07-28) ------------------ - Fixed bug where inserting records with 11 columns in a batch of 100 triggered a "too many SQL variables" error (:issue:`50`) - Documentation and tests for ``table.drop()`` method: :ref:`python_api_drop` .. _v1_7: 1.7 (2019-07-24) ---------------- Support for lookup tables. - New ``table.lookup({...})`` utility method for building and querying lookup tables - see :ref:`python_api_lookup_tables` (:issue:`44`) - New ``extracts=`` table configuration option, see :ref:`python_api_extracts` (:issue:`46`) - Use `pysqlite3 `__ if it is available, otherwise use ``sqlite3`` from the standard library - Table options can now be passed to the new ``db.table(name, **options)`` factory function in addition to being passed to ``insert_all(records, **options)`` and friends - see :ref:`python_api_table_configuration` - In-memory databases can now be created using ``db = Database(memory=True)`` .. _v1_6: 1.6 (2019-07-18) ---------------- - ``sqlite-utils insert`` can now accept TSV data via the new ``--tsv`` option (:issue:`41`) .. _v1_5: 1.5 (2019-07-14) ---------------- - Support for compound primary keys (:issue:`36`) - Configure these using the CLI tool by passing ``--pk`` multiple times - In Python, pass a tuple of columns to the ``pk=(..., ...)`` argument: :ref:`python_api_compound_primary_keys` - New ``table.get()`` method for retrieving a record by its primary key: :ref:`python_api_get` (:issue:`39`) .. _v1_4_1: 1.4.1 (2019-07-14) ------------------ - Assorted minor documentation fixes: `changes since 1.4 `__ .. _v1_4: 1.4 (2019-06-30) ---------------- - Added ``sqlite-utils index-foreign-keys`` command (:ref:`docs `) and ``db.index_foreign_keys()`` method (:ref:`docs `) (:issue:`33`) .. _v1_3: 1.3 (2019-06-28) ---------------- - New mechanism for adding multiple foreign key constraints at once: :ref:`db.add_foreign_keys() documentation ` (:issue:`31`) .. _v1_2_2: 1.2.2 (2019-06-25) ------------------ - Fixed bug where ``datetime.time`` was not being handled correctly .. _v1_2_1: 1.2.1 (2019-06-20) ------------------ - Check the column exists before attempting to add a foreign key (:issue:`29`) .. _v1_2: 1.2 (2019-06-12) ---------------- - Improved foreign key definitions: you no longer need to specify the ``column``, ``other_table`` AND ``other_column`` to define a foreign key - if you omit the ``other_table`` or ``other_column`` the script will attempt to guess the correct values by introspecting the database. See :ref:`python_api_add_foreign_key` for details. (:issue:`25`) - Ability to set ``NOT NULL`` constraints and ``DEFAULT`` values when creating tables (:issue:`24`). Documentation: :ref:`Setting defaults and not null constraints (Python API) `, :ref:`Setting defaults and not null constraints (CLI) ` - Support for ``not_null_default=X`` / ``--not-null-default`` for setting a ``NOT NULL DEFAULT 'x'`` when adding a new column. Documentation: :ref:`Adding columns (Python API) `, :ref:`Adding columns (CLI) ` .. _v1_1: 1.1 (2019-05-28) ---------------- - Support for ``ignore=True`` / ``--ignore`` for ignoring inserted records if the primary key already exists (:issue:`21`) - documentation: :ref:`Inserting data (Python API) `, :ref:`Inserting data (CLI) ` - Ability to add a column that is a foreign key reference using ``fk=...`` / ``--fk`` (:issue:`16`) - documentation: :ref:`Adding columns (Python API) `, :ref:`Adding columns (CLI) ` .. _v1_0_1: 1.0.1 (2019-05-27) ------------------ - ``sqlite-utils rows data.db table --json-cols`` - fixed bug where ``--json-cols`` was not obeyed .. _v1_0: 1.0 (2019-05-24) ---------------- - Option to automatically add new columns if you attempt to insert or upsert data with extra fields: ``sqlite-utils insert ... --alter`` - see :ref:`Adding columns automatically with the sqlite-utils CLI ` ``db["tablename"].insert(record, alter=True)`` - see :ref:`Adding columns automatically using the Python API ` - New ``--json-cols`` option for outputting nested JSON, see :ref:`cli_json_values` .. _v0_14: 0.14 (2019-02-24) ----------------- - Ability to create unique indexes: ``db["mytable"].create_index(["name"], unique=True)`` - ``db["mytable"].create_index(["name"], if_not_exists=True)`` - ``$ sqlite-utils create-index mydb.db mytable col1 [col2...]``, see :ref:`cli_create_index` - ``table.add_column(name, type)`` method, see :ref:`python_api_add_column` - ``$ sqlite-utils add-column mydb.db mytable nameofcolumn``, see :ref:`cli_add_column` (CLI) - ``db["books"].add_foreign_key("author_id", "authors", "id")``, see :ref:`python_api_add_foreign_key` - ``$ sqlite-utils add-foreign-key books.db books author_id authors id``, see :ref:`cli_add_foreign_key` (CLI) - Improved (but backwards-incompatible) ``foreign_keys=`` argument to various methods, see :ref:`python_api_foreign_keys` .. _v0_13: 0.13 (2019-02-23) ----------------- - New ``--table`` and ``--fmt`` options can be used to output query results in a variety of visual table formats, see :ref:`cli_query_table` - New ``hash_id=`` argument can now be used for :ref:`python_api_hash` - Can now derive correct column types for numpy int, uint and float values - ``table.last_id`` has been renamed to ``table.last_rowid`` - ``table.last_pk`` now contains the last inserted primary key, if ``pk=`` was specified - Prettier indentation in the ``CREATE TABLE`` generated schemas .. _v0_12: 0.12 (2019-02-22) ----------------- - Added ``db[table].rows`` iterator - see :ref:`python_api_rows` - Replaced ``sqlite-utils json`` and ``sqlite-utils csv`` with a new default subcommand called ``sqlite-utils query`` which defaults to JSON and takes formatting options ``--nl``, ``--csv`` and ``--no-headers`` - see :ref:`cli_query_json` and :ref:`cli_query_csv` - New ``sqlite-utils rows data.db name-of-table`` command, see :ref:`cli_rows` - ``sqlite-utils table`` command now takes options ``--counts`` and ``--columns`` plus the standard output format options, see :ref:`cli_tables` .. _v0_11: 0.11 (2019-02-07) ----------------- New commands for enabling FTS against a table and columns:: sqlite-utils enable-fts db.db mytable col1 col2 See :ref:`cli_fts`. .. _v0_10: 0.10 (2019-02-06) ----------------- Handle ``datetime.date`` and ``datetime.time`` values. New option for efficiently inserting rows from a CSV: :: sqlite-utils insert db.db foo - --csv .. _v0_9: 0.9 (2019-01-27) ---------------- Improved support for newline-delimited JSON. ``sqlite-utils insert`` has two new command-line options: * ``--nl`` means "expect newline-delimited JSON". This is an extremely efficient way of loading in large amounts of data, especially if you pipe it into standard input. * ``--batch-size=1000`` lets you increase the batch size (default is 100). A commit will be issued every X records. This also control how many initial records are considered when detecting the desired SQL table schema for the data. In the Python API, the ``table.insert_all(...)`` method can now accept a generator as well as a list of objects. This will be efficiently used to populate the table no matter how many records are produced by the generator. The ``Database()`` constructor can now accept a ``pathlib.Path`` object in addition to a string or an existing SQLite connection object. .. _v0_8: 0.8 (2019-01-25) ---------------- Two new commands: ``sqlite-utils csv`` and ``sqlite-utils json`` These commands execute a SQL query and return the results as CSV or JSON. See :ref:`cli_query_csv` and :ref:`cli_query_json` for more details. :: $ sqlite-utils json --help Usage: sqlite-utils json [OPTIONS] PATH SQL Execute SQL query and return the results as JSON Options: --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --help Show this message and exit. $ sqlite-utils csv --help Usage: sqlite-utils csv [OPTIONS] PATH SQL Execute SQL query and return the results as CSV Options: --no-headers Exclude headers from CSV output --help Show this message and exit. .. _v0_7: 0.7 (2019-01-24) ---------------- This release implements the ``sqlite-utils`` command-line tool with a number of useful subcommands. - ``sqlite-utils tables demo.db`` lists the tables in the database - ``sqlite-utils tables demo.db --fts4`` shows just the FTS4 tables - ``sqlite-utils tables demo.db --fts5`` shows just the FTS5 tables - ``sqlite-utils vacuum demo.db`` runs VACUUM against the database - ``sqlite-utils optimize demo.db`` runs OPTIMIZE against all FTS tables, then VACUUM - ``sqlite-utils optimize demo.db --no-vacuum`` runs OPTIMIZE but skips VACUUM The two most useful subcommands are ``upsert`` and ``insert``, which allow you to ingest JSON files with one or more records in them, creating the corresponding table with the correct columns if it does not already exist. See :ref:`cli_inserting_data` for more details. - ``sqlite-utils insert demo.db dogs dogs.json --pk=id`` inserts new records from ``dogs.json`` into the ``dogs`` table - ``sqlite-utils upsert demo.db dogs dogs.json --pk=id`` upserts records, replacing any records with duplicate primary keys One backwards incompatible change: the ``db["table"].table_names`` property is now a method: - ``db["table"].table_names()`` returns a list of table names - ``db["table"].table_names(fts4=True)`` returns a list of just the FTS4 tables - ``db["table"].table_names(fts5=True)`` returns a list of just the FTS5 tables A few other changes: - Plenty of updated documentation, including full coverage of the new command-line tool - Allow column names to be reserved words (use correct SQL escaping) - Added automatic column support for bytes and datetime.datetime .. _v0_6: 0.6 (2018-08-12) ---------------- - ``.enable_fts()`` now takes optional argument ``fts_version``, defaults to ``FTS5``. Use ``FTS4`` if the version of SQLite bundled with your Python does not support FTS5 - New optional ``column_order=`` argument to ``.insert()`` and friends for providing a partial or full desired order of the columns when a database table is created - :ref:`New documentation ` for ``.insert_all()`` and ``.upsert()`` and ``.upsert_all()`` .. _v0_5: 0.5 (2018-08-05) ---------------- - ``db.tables`` and ``db.table_names`` introspection properties - ``db.indexes`` property for introspecting indexes - ``table.create_index(columns, index_name)`` method - ``db.create_view(name, sql)`` method - Table methods can now be chained, plus added ``table.last_id`` for accessing the last inserted row ID 0.4 (2018-07-31) ---------------- - ``enable_fts()``, ``populate_fts()`` and ``search()`` table methods 0.3.1 (2018-07-31) ------------------ - Documented related projects - Added badges to the documentation 0.3 (2018-07-31) ---------------- - New ``Table`` class representing a table in the SQLite database 0.2 (2018-07-28) ---------------- - Initial release to PyPI sqlite-utils-3.35.2/docs/cli-reference.rst000066400000000000000000001412661452131415600204440ustar00rootroot00000000000000.. _cli_reference: =============== CLI reference =============== This page lists the ``--help`` for every ``sqlite-utils`` CLI sub-command. .. contents:: :local: :class: this-will-duplicate-information-and-it-is-still-useful-here .. [[[cog from sqlite_utils import cli import sys sys._called_from_test = True from click.testing import CliRunner import textwrap commands = list(cli.cli.commands.keys()) go_first = [ "query", "memory", "insert", "upsert", "bulk", "search", "transform", "extract", "schema", "insert-files", "analyze-tables", "convert", "tables", "views", "rows", "triggers", "indexes", "create-database", "create-table", "create-index", "enable-fts", "populate-fts", "rebuild-fts", "disable-fts" ] refs = { "query": "cli_query", "memory": "cli_memory", "insert": [ "cli_inserting_data", "cli_insert_csv_tsv", "cli_insert_unstructured", "cli_insert_convert" ], "upsert": "cli_upsert", "tables": "cli_tables", "views": "cli_views", "optimize": "cli_optimize", "rows": "cli_rows", "triggers": "cli_triggers", "indexes": "cli_indexes", "enable-fts": "cli_fts", "analyze": "cli_analyze", "vacuum": "cli_vacuum", "dump": "cli_dump", "add-column": "cli_add_column", "rename-table": "cli_renaming_tables", "duplicate": "cli_duplicate_table", "add-foreign-key": "cli_add_foreign_key", "add-foreign-keys": "cli_add_foreign_keys", "index-foreign-keys": "cli_index_foreign_keys", "create-index": "cli_create_index", "enable-wal": "cli_wal", "enable-counts": "cli_enable_counts", "bulk": "cli_bulk", "create-database": "cli_create_database", "create-table": "cli_create_table", "drop-table": "cli_drop_table", "create-view": "cli_create_view", "drop-view": "cli_drop_view", "search": "cli_search", "transform": "cli_transform_table", "extract": "cli_extract", "schema": "cli_schema", "insert-files": "cli_insert_files", "analyze-tables": "cli_analyze_tables", "convert": "cli_convert", "add-geometry-column": "cli_spatialite", "create-spatial-index": "cli_spatialite_indexes", "install": "cli_install", "uninstall": "cli_uninstall", "tui": "cli_tui", } commands.sort(key = lambda command: go_first.index(command) if command in go_first else 999) cog.out("\n") for command in commands: cog.out(".. _cli_ref_" + command.replace("-", "_") + ":\n\n") cog.out(command + "\n") cog.out(("=" * len(command)) + "\n\n") if command in refs: command_refs = refs[command] if isinstance(command_refs, str): command_refs = [command_refs] cog.out( "See {}.\n\n".format( ", ".join(":ref:`{}`".format(c) for c in command_refs) ) ) cog.out("::\n\n") result = CliRunner().invoke(cli.cli, [command, "--help"]) output = result.output.replace("Usage: cli ", "Usage: sqlite-utils ") cog.out(textwrap.indent(output, ' ')) cog.out("\n\n") .. ]]] .. _cli_ref_query: query ===== See :ref:`cli_query`. :: Usage: sqlite-utils query [OPTIONS] PATH SQL Execute SQL query and return the results as JSON Example: sqlite-utils data.db \ "select * from chickens where age > :age" \ -p age 1 Options: --attach ... Additional databases to attach - specify alias and filepath --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings -r, --raw Raw output, first column of first row --raw-lines Raw output, first column of each row -p, --param ... Named :parameters for SQL query --functions TEXT Python code defining one or more custom SQL functions --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_memory: memory ====== See :ref:`cli_memory`. :: Usage: sqlite-utils memory [OPTIONS] [PATHS]... SQL Execute SQL query against an in-memory database, optionally populated by imported data To import data from CSV, TSV or JSON files pass them on the command-line: sqlite-utils memory one.csv two.json \ "select * from one join two on one.two_id = two.id" For data piped into the tool from standard input, use "-" or "stdin": cat animals.csv | sqlite-utils memory - \ "select * from stdin where species = 'dog'" The format of the data will be automatically detected. You can specify the format explicitly using :json, :csv, :tsv or :nl (for newline-delimited JSON) - for example: cat animals.csv | sqlite-utils memory stdin:csv places.dat:nl \ "select * from stdin where place_id in (select id from places)" Use --schema to view the SQL schema of any imported files: sqlite-utils memory animals.csv --schema Options: --functions TEXT Python code defining one or more custom SQL functions --attach ... Additional databases to attach - specify alias and filepath --flatten Flatten nested JSON objects, so {"foo": {"bar": 1}} becomes {"foo_bar": 1} --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings -r, --raw Raw output, first column of first row --raw-lines Raw output, first column of each row -p, --param ... Named :parameters for SQL query --encoding TEXT Character encoding for CSV input, defaults to utf-8 -n, --no-detect-types Treat all CSV/TSV columns as TEXT --schema Show SQL schema for in-memory database --dump Dump SQL for in-memory database --save FILE Save in-memory database to this file --analyze Analyze resulting tables and output results --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_insert: insert ====== See :ref:`cli_inserting_data`, :ref:`cli_insert_csv_tsv`, :ref:`cli_insert_unstructured`, :ref:`cli_insert_convert`. :: Usage: sqlite-utils insert [OPTIONS] PATH TABLE FILE Insert records from FILE into a table, creating the table if it does not already exist. Example: echo '{"name": "Lila"}' | sqlite-utils insert data.db chickens - By default the input is expected to be a JSON object or array of objects. - Use --nl for newline-delimited JSON objects - Use --csv or --tsv for comma-separated or tab-separated input - Use --lines to write each incoming line to a column called "line" - Use --text to write the entire input to a column called "text" You can also use --convert to pass a fragment of Python code that will be used to convert each input. Your Python code will be passed a "row" variable representing the imported row, and can return a modified row. This example uses just the name, latitude and longitude columns from a CSV file, converting name to upper case and latitude and longitude to floating point numbers: sqlite-utils insert plants.db plants plants.csv --csv --convert ' return { "name": row["name"].upper(), "latitude": float(row["latitude"]), "longitude": float(row["longitude"]), }' If you are using --lines your code will be passed a "line" variable, and for --text a "text" variable. When using --text your function can return an iterator of rows to insert. This example inserts one record per word in the input: echo 'A bunch of words' | sqlite-utils insert words.db words - \ --text --convert '({"word": w} for w in text.split())' Options: --pk TEXT Columns to use as the primary key, e.g. id --flatten Flatten nested JSON objects, so {"a": {"b": 1}} becomes {"a_b": 1} --nl Expect newline-delimited JSON -c, --csv Expect CSV input --tsv Expect TSV input --empty-null Treat empty strings as NULL --lines Treat each line as a single value called 'line' --text Treat input as a single value called 'text' --convert TEXT Python code to convert each item --import TEXT Python modules to import --delimiter TEXT Delimiter to use for CSV files --quotechar TEXT Quote character to use for CSV/TSV --sniff Detect delimiter and quote character --no-headers CSV file has no header row --encoding TEXT Character encoding for input, defaults to utf-8 --batch-size INTEGER Commit every X records --stop-after INTEGER Stop after X records --alter Alter existing table to add any missing columns --not-null TEXT Columns that should be created as NOT NULL --default ... Default value that should be set for a column -d, --detect-types Detect types for columns in CSV/TSV data --analyze Run ANALYZE at the end of this operation --load-extension TEXT Path to SQLite extension, with optional :entrypoint --silent Do not show progress bar --ignore Ignore records if pk already exists --replace Replace records if pk already exists --truncate Truncate table before inserting records, if table already exists -h, --help Show this message and exit. .. _cli_ref_upsert: upsert ====== See :ref:`cli_upsert`. :: Usage: sqlite-utils upsert [OPTIONS] PATH TABLE FILE Upsert records based on their primary key. Works like 'insert' but if an incoming record has a primary key that matches an existing record the existing record will be updated. Example: echo '[ {"id": 1, "name": "Lila"}, {"id": 2, "name": "Suna"} ]' | sqlite-utils upsert data.db chickens - --pk id Options: --pk TEXT Columns to use as the primary key, e.g. id [required] --flatten Flatten nested JSON objects, so {"a": {"b": 1}} becomes {"a_b": 1} --nl Expect newline-delimited JSON -c, --csv Expect CSV input --tsv Expect TSV input --empty-null Treat empty strings as NULL --lines Treat each line as a single value called 'line' --text Treat input as a single value called 'text' --convert TEXT Python code to convert each item --import TEXT Python modules to import --delimiter TEXT Delimiter to use for CSV files --quotechar TEXT Quote character to use for CSV/TSV --sniff Detect delimiter and quote character --no-headers CSV file has no header row --encoding TEXT Character encoding for input, defaults to utf-8 --batch-size INTEGER Commit every X records --stop-after INTEGER Stop after X records --alter Alter existing table to add any missing columns --not-null TEXT Columns that should be created as NOT NULL --default ... Default value that should be set for a column -d, --detect-types Detect types for columns in CSV/TSV data --analyze Run ANALYZE at the end of this operation --load-extension TEXT Path to SQLite extension, with optional :entrypoint --silent Do not show progress bar -h, --help Show this message and exit. .. _cli_ref_bulk: bulk ==== See :ref:`cli_bulk`. :: Usage: sqlite-utils bulk [OPTIONS] PATH SQL FILE Execute parameterized SQL against the provided list of documents. Example: echo '[ {"id": 1, "name": "Lila2"}, {"id": 2, "name": "Suna2"} ]' | sqlite-utils bulk data.db ' update chickens set name = :name where id = :id ' - Options: --batch-size INTEGER Commit every X records --functions TEXT Python code defining one or more custom SQL functions --flatten Flatten nested JSON objects, so {"a": {"b": 1}} becomes {"a_b": 1} --nl Expect newline-delimited JSON -c, --csv Expect CSV input --tsv Expect TSV input --empty-null Treat empty strings as NULL --lines Treat each line as a single value called 'line' --text Treat input as a single value called 'text' --convert TEXT Python code to convert each item --import TEXT Python modules to import --delimiter TEXT Delimiter to use for CSV files --quotechar TEXT Quote character to use for CSV/TSV --sniff Detect delimiter and quote character --no-headers CSV file has no header row --encoding TEXT Character encoding for input, defaults to utf-8 --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_search: search ====== See :ref:`cli_search`. :: Usage: sqlite-utils search [OPTIONS] PATH DBTABLE Q Execute a full-text search against this table Example: sqlite-utils search data.db chickens lila Options: -o, --order TEXT Order by ('column' or 'column desc') -c, --column TEXT Columns to return --limit INTEGER Number of rows to return - defaults to everything --sql Show SQL query that would be run --quote Apply FTS quoting rules to search term --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_transform: transform ========= See :ref:`cli_transform_table`. :: Usage: sqlite-utils transform [OPTIONS] PATH TABLE Transform a table beyond the capabilities of ALTER TABLE Example: sqlite-utils transform mydb.db mytable \ --drop column1 \ --rename column2 column_renamed Options: --type ... Change column type to INTEGER, TEXT, FLOAT or BLOB --drop TEXT Drop this column --rename ... Rename this column to X -o, --column-order TEXT Reorder columns --not-null TEXT Set this column to NOT NULL --not-null-false TEXT Remove NOT NULL from this column --pk TEXT Make this column the primary key --pk-none Remove primary key (convert to rowid table) --default ... Set default value for this column --default-none TEXT Remove default from this column --add-foreign-key ... Add a foreign key constraint from a column to another table with another column --drop-foreign-key TEXT Drop foreign key constraint for this column --sql Output SQL without executing it --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_extract: extract ======= See :ref:`cli_extract`. :: Usage: sqlite-utils extract [OPTIONS] PATH TABLE COLUMNS... Extract one or more columns into a separate table Example: sqlite-utils extract trees.db Street_Trees species Options: --table TEXT Name of the other table to extract columns to --fk-column TEXT Name of the foreign key column to add to the table --rename ... Rename this column in extracted table --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_schema: schema ====== See :ref:`cli_schema`. :: Usage: sqlite-utils schema [OPTIONS] PATH [TABLES]... Show full schema for this database or for specified tables Example: sqlite-utils schema trees.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_insert_files: insert-files ============ See :ref:`cli_insert_files`. :: Usage: sqlite-utils insert-files [OPTIONS] PATH TABLE FILE_OR_DIR... Insert one or more files using BLOB columns in the specified table Example: sqlite-utils insert-files pics.db images *.gif \ -c name:name \ -c content:content \ -c content_hash:sha256 \ -c created:ctime_iso \ -c modified:mtime_iso \ -c size:size \ --pk name Options: -c, --column TEXT Column definitions for the table --pk TEXT Column to use as primary key --alter Alter table to add missing columns --replace Replace files with matching primary key --upsert Upsert files with matching primary key --name TEXT File name to use --text Store file content as TEXT, not BLOB --encoding TEXT Character encoding for input, defaults to utf-8 -s, --silent Don't show a progress bar --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_analyze_tables: analyze-tables ============== See :ref:`cli_analyze_tables`. :: Usage: sqlite-utils analyze-tables [OPTIONS] PATH [TABLES]... Analyze the columns in one or more tables Example: sqlite-utils analyze-tables data.db trees Options: -c, --column TEXT Specific columns to analyze --save Save results to _analyze_tables table --common-limit INTEGER How many common values --no-most Skip most common values --no-least Skip least common values --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_convert: convert ======= See :ref:`cli_convert`. :: Usage: sqlite-utils convert [OPTIONS] DB_PATH TABLE COLUMNS... CODE Convert columns using Python code you supply. For example: sqlite-utils convert my.db mytable mycolumn \ '"\n".join(textwrap.wrap(value, 10))' \ --import=textwrap "value" is a variable with the column value to be converted. Use "-" for CODE to read Python code from standard input. The following common operations are available as recipe functions: r.jsonsplit(value, delimiter=',', type=) Convert a string like a,b,c into a JSON array ["a", "b", "c"] r.parsedate(value, dayfirst=False, yearfirst=False, errors=None) Parse a date and convert it to ISO date format: yyyy-mm-dd  - dayfirst=True: treat xx as the day in xx/yy/zz - yearfirst=True: treat xx as the year in xx/yy/zz - errors=r.IGNORE to ignore values that cannot be parsed - errors=r.SET_NULL to set values that cannot be parsed to null r.parsedatetime(value, dayfirst=False, yearfirst=False, errors=None) Parse a datetime and convert it to ISO datetime format: yyyy-mm-ddTHH:MM:SS  - dayfirst=True: treat xx as the day in xx/yy/zz - yearfirst=True: treat xx as the year in xx/yy/zz - errors=r.IGNORE to ignore values that cannot be parsed - errors=r.SET_NULL to set values that cannot be parsed to null You can use these recipes like so: sqlite-utils convert my.db mytable mycolumn \ 'r.jsonsplit(value, delimiter=":")' Options: --import TEXT Python modules to import --dry-run Show results of running this against first 10 rows --multi Populate columns for keys in returned dictionary --where TEXT Optional where clause -p, --param ... Named :parameters for where clause --output TEXT Optional separate column to populate with the output --output-type [integer|float|blob|text] Column type to use for the output column --drop Drop original column afterwards --no-skip-false Don't skip falsey values -s, --silent Don't show a progress bar --pdb Open pdb debugger on first error -h, --help Show this message and exit. .. _cli_ref_tables: tables ====== See :ref:`cli_tables`. :: Usage: sqlite-utils tables [OPTIONS] PATH List the tables in the database Example: sqlite-utils tables trees.db Options: --fts4 Just show FTS4 enabled tables --fts5 Just show FTS5 enabled tables --counts Include row counts per table --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --columns Include list of columns for each table --schema Include schema for each table --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_views: views ===== See :ref:`cli_views`. :: Usage: sqlite-utils views [OPTIONS] PATH List the views in the database Example: sqlite-utils views trees.db Options: --counts Include row counts per view --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --columns Include list of columns for each view --schema Include schema for each view --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_rows: rows ==== See :ref:`cli_rows`. :: Usage: sqlite-utils rows [OPTIONS] PATH DBTABLE Output all rows in the specified table Example: sqlite-utils rows trees.db Trees Options: -c, --column TEXT Columns to return --where TEXT Optional where clause -o, --order TEXT Order by ('column' or 'column desc') -p, --param ... Named :parameters for where clause --limit INTEGER Number of rows to return - defaults to everything --offset INTEGER SQL offset to use --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_triggers: triggers ======== See :ref:`cli_triggers`. :: Usage: sqlite-utils triggers [OPTIONS] PATH [TABLES]... Show triggers configured in this database Example: sqlite-utils triggers trees.db Options: --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_indexes: indexes ======= See :ref:`cli_indexes`. :: Usage: sqlite-utils indexes [OPTIONS] PATH [TABLES]... Show indexes for the whole database or specific tables Example: sqlite-utils indexes trees.db Trees Options: --aux Include auxiliary columns --nl Output newline-delimited JSON --arrays Output rows as arrays instead of objects --csv Output CSV --tsv Output TSV --no-headers Omit CSV headers -t, --table Output as a formatted table --fmt TEXT Table format - one of asciidoc, double_grid, double_outline, fancy_grid, fancy_outline, github, grid, heavy_grid, heavy_outline, html, jira, latex, latex_booktabs, latex_longtable, latex_raw, mediawiki, mixed_grid, mixed_outline, moinmoin, orgtbl, outline, pipe, plain, presto, pretty, psql, rounded_grid, rounded_outline, rst, simple, simple_grid, simple_outline, textile, tsv, unsafehtml, youtrack --json-cols Detect JSON cols and output them as JSON, not escaped strings --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_create_database: create-database =============== See :ref:`cli_create_database`. :: Usage: sqlite-utils create-database [OPTIONS] PATH Create a new empty database file Example: sqlite-utils create-database trees.db Options: --enable-wal Enable WAL mode on the created database --init-spatialite Enable SpatiaLite on the created database --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_create_table: create-table ============ See :ref:`cli_create_table`. :: Usage: sqlite-utils create-table [OPTIONS] PATH TABLE COLUMNS... Add a table with the specified columns. Columns should be specified using name, type pairs, for example: sqlite-utils create-table my.db people \ id integer \ name text \ height float \ photo blob --pk id Valid column types are text, integer, float and blob. Options: --pk TEXT Column to use as primary key --not-null TEXT Columns that should be created as NOT NULL --default ... Default value that should be set for a column --fk ... Column, other table, other column to set as a foreign key --ignore If table already exists, do nothing --replace If table already exists, replace it --transform If table already exists, try to transform the schema --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_create_index: create-index ============ See :ref:`cli_create_index`. :: Usage: sqlite-utils create-index [OPTIONS] PATH TABLE COLUMN... Add an index to the specified table for the specified columns Example: sqlite-utils create-index chickens.db chickens name To create an index in descending order: sqlite-utils create-index chickens.db chickens -- -name Options: --name TEXT Explicit name for the new index --unique Make this a unique index --if-not-exists, --ignore Ignore if index already exists --analyze Run ANALYZE after creating the index --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_enable_fts: enable-fts ========== See :ref:`cli_fts`. :: Usage: sqlite-utils enable-fts [OPTIONS] PATH TABLE COLUMN... Enable full-text search for specific table and columns" Example: sqlite-utils enable-fts chickens.db chickens name Options: --fts4 Use FTS4 --fts5 Use FTS5 --tokenize TEXT Tokenizer to use, e.g. porter --create-triggers Create triggers to update the FTS tables when the parent table changes. --replace Replace existing FTS configuration if it exists --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_populate_fts: populate-fts ============ :: Usage: sqlite-utils populate-fts [OPTIONS] PATH TABLE COLUMN... Re-populate full-text search for specific table and columns Example: sqlite-utils populate-fts chickens.db chickens name Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_rebuild_fts: rebuild-fts =========== :: Usage: sqlite-utils rebuild-fts [OPTIONS] PATH [TABLES]... Rebuild all or specific full-text search tables Example: sqlite-utils rebuild-fts chickens.db chickens Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_disable_fts: disable-fts =========== :: Usage: sqlite-utils disable-fts [OPTIONS] PATH TABLE Disable full-text search for specific table Example: sqlite-utils disable-fts chickens.db chickens Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_tui: tui === See :ref:`cli_tui`. :: Usage: sqlite-utils tui [OPTIONS] Open Textual TUI. Options: -h, --help Show this message and exit. .. _cli_ref_optimize: optimize ======== See :ref:`cli_optimize`. :: Usage: sqlite-utils optimize [OPTIONS] PATH [TABLES]... Optimize all full-text search tables and then run VACUUM - should shrink the database file Example: sqlite-utils optimize chickens.db Options: --no-vacuum Don't run VACUUM --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_analyze: analyze ======= See :ref:`cli_analyze`. :: Usage: sqlite-utils analyze [OPTIONS] PATH [NAMES]... Run ANALYZE against the whole database, or against specific named indexes and tables Example: sqlite-utils analyze chickens.db Options: -h, --help Show this message and exit. .. _cli_ref_vacuum: vacuum ====== See :ref:`cli_vacuum`. :: Usage: sqlite-utils vacuum [OPTIONS] PATH Run VACUUM against the database Example: sqlite-utils vacuum chickens.db Options: -h, --help Show this message and exit. .. _cli_ref_dump: dump ==== See :ref:`cli_dump`. :: Usage: sqlite-utils dump [OPTIONS] PATH Output a SQL dump of the schema and full contents of the database Example: sqlite-utils dump chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_add_column: add-column ========== See :ref:`cli_add_column`. :: Usage: sqlite-utils add-column [OPTIONS] PATH TABLE COL_NAME [[integer|float|blob|text|INTEGER|FLOAT|BLOB|TEXT]] Add a column to the specified table Example: sqlite-utils add-column chickens.db chickens weight float Options: --fk TEXT Table to reference as a foreign key --fk-col TEXT Referenced column on that foreign key table - if omitted will automatically use the primary key --not-null-default TEXT Add NOT NULL DEFAULT 'TEXT' constraint --ignore If column already exists, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_add_foreign_key: add-foreign-key =============== See :ref:`cli_add_foreign_key`. :: Usage: sqlite-utils add-foreign-key [OPTIONS] PATH TABLE COLUMN [OTHER_TABLE] [OTHER_COLUMN] Add a new foreign key constraint to an existing table Example: sqlite-utils add-foreign-key my.db books author_id authors id Options: --ignore If foreign key already exists, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_add_foreign_keys: add-foreign-keys ================ See :ref:`cli_add_foreign_keys`. :: Usage: sqlite-utils add-foreign-keys [OPTIONS] PATH [FOREIGN_KEY]... Add multiple new foreign key constraints to a database Example: sqlite-utils add-foreign-keys my.db \ books author_id authors id \ authors country_id countries id Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_index_foreign_keys: index-foreign-keys ================== See :ref:`cli_index_foreign_keys`. :: Usage: sqlite-utils index-foreign-keys [OPTIONS] PATH Ensure every foreign key column has an index on it Example: sqlite-utils index-foreign-keys chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_enable_wal: enable-wal ========== See :ref:`cli_wal`. :: Usage: sqlite-utils enable-wal [OPTIONS] PATH... Enable WAL for database files Example: sqlite-utils enable-wal chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_disable_wal: disable-wal =========== :: Usage: sqlite-utils disable-wal [OPTIONS] PATH... Disable WAL for database files Example: sqlite-utils disable-wal chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_enable_counts: enable-counts ============= See :ref:`cli_enable_counts`. :: Usage: sqlite-utils enable-counts [OPTIONS] PATH [TABLES]... Configure triggers to update a _counts table with row counts Example: sqlite-utils enable-counts chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_reset_counts: reset-counts ============ :: Usage: sqlite-utils reset-counts [OPTIONS] PATH Reset calculated counts in the _counts table Example: sqlite-utils reset-counts chickens.db Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_duplicate: duplicate ========= See :ref:`cli_duplicate_table`. :: Usage: sqlite-utils duplicate [OPTIONS] PATH TABLE NEW_TABLE Create a duplicate of this table, copying across the schema and all row data. Options: --ignore If table does not exist, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_rename_table: rename-table ============ See :ref:`cli_renaming_tables`. :: Usage: sqlite-utils rename-table [OPTIONS] PATH TABLE NEW_NAME Rename this table. Options: --ignore If table does not exist, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_drop_table: drop-table ========== See :ref:`cli_drop_table`. :: Usage: sqlite-utils drop-table [OPTIONS] PATH TABLE Drop the specified table Example: sqlite-utils drop-table chickens.db chickens Options: --ignore If table does not exist, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_create_view: create-view =========== See :ref:`cli_create_view`. :: Usage: sqlite-utils create-view [OPTIONS] PATH VIEW SELECT Create a view for the provided SELECT query Example: sqlite-utils create-view chickens.db heavy_chickens \ 'select * from chickens where weight > 3' Options: --ignore If view already exists, do nothing --replace If view already exists, replace it --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_drop_view: drop-view ========= See :ref:`cli_drop_view`. :: Usage: sqlite-utils drop-view [OPTIONS] PATH VIEW Drop the specified view Example: sqlite-utils drop-view chickens.db heavy_chickens Options: --ignore If view does not exist, do nothing --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_install: install ======= See :ref:`cli_install`. :: Usage: sqlite-utils install [OPTIONS] [PACKAGES]... Install packages from PyPI into the same environment as sqlite-utils Options: -U, --upgrade Upgrade packages to latest version -e, --editable TEXT Install a project in editable mode from this path -h, --help Show this message and exit. .. _cli_ref_uninstall: uninstall ========= See :ref:`cli_uninstall`. :: Usage: sqlite-utils uninstall [OPTIONS] PACKAGES... Uninstall Python packages from the sqlite-utils environment Options: -y, --yes Don't ask for confirmation -h, --help Show this message and exit. .. _cli_ref_add_geometry_column: add-geometry-column =================== See :ref:`cli_spatialite`. :: Usage: sqlite-utils add-geometry-column [OPTIONS] DB_PATH TABLE COLUMN_NAME Add a SpatiaLite geometry column to an existing table. Requires SpatiaLite extension. By default, this command will try to load the SpatiaLite extension from usual paths. To load it from a specific path, use --load-extension. Options: -t, --type [POINT|LINESTRING|POLYGON|MULTIPOINT|MULTILINESTRING|MULTIPOLYGON|GEOMETRYCOLLECTION|GEOMETRY] Specify a geometry type for this column. [default: GEOMETRY] --srid INTEGER Spatial Reference ID. See https://spatialreference.org for details on specific projections. [default: 4326] --dimensions TEXT Coordinate dimensions. Use XYZ for three- dimensional geometries. --not-null Add a NOT NULL constraint. --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_create_spatial_index: create-spatial-index ==================== See :ref:`cli_spatialite_indexes`. :: Usage: sqlite-utils create-spatial-index [OPTIONS] DB_PATH TABLE COLUMN_NAME Create a spatial index on a SpatiaLite geometry column. The table and geometry column must already exist before trying to add a spatial index. By default, this command will try to load the SpatiaLite extension from usual paths. To load it from a specific path, use --load-extension. Options: --load-extension TEXT Path to SQLite extension, with optional :entrypoint -h, --help Show this message and exit. .. _cli_ref_plugins: plugins ======= :: Usage: sqlite-utils plugins [OPTIONS] List installed plugins Options: -h, --help Show this message and exit. .. [[[end]]] sqlite-utils-3.35.2/docs/cli.rst000066400000000000000000002537251452131415600165140ustar00rootroot00000000000000.. _cli: ================================ sqlite-utils command-line tool ================================ The ``sqlite-utils`` command-line tool can be used to manipulate SQLite databases in a number of different ways. Once :ref:`installed ` the tool should be available as ``sqlite-utils``. It can also be run using ``python -m sqlite_utils``. .. contents:: :local: :class: this-will-duplicate-information-and-it-is-still-useful-here .. _cli_query: Running SQL queries =================== The ``sqlite-utils query`` command lets you run queries directly against a SQLite database file. This is the default subcommand, so the following two examples work the same way: .. code-block:: bash sqlite-utils query dogs.db "select * from dogs" .. code-block:: bash sqlite-utils dogs.db "select * from dogs" .. note:: In Python: :ref:`db.query() ` CLI reference: :ref:`sqlite-utils query ` .. _cli_query_json: Returning JSON -------------- The default format returned for queries is JSON: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" .. code-block:: output [{"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}] .. _cli_query_nl: Newline-delimited JSON ~~~~~~~~~~~~~~~~~~~~~~ Use ``--nl`` to get back newline-delimited JSON objects: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --nl .. code-block:: output {"id": 1, "age": 4, "name": "Cleo"} {"id": 2, "age": 2, "name": "Pancakes"} .. _cli_query_arrays: JSON arrays ~~~~~~~~~~~ You can use ``--arrays`` to request arrays instead of objects: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --arrays .. code-block:: output [[1, 4, "Cleo"], [2, 2, "Pancakes"]] You can also combine ``--arrays`` and ``--nl``: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --arrays --nl .. code-block:: output [1, 4, "Cleo"] [2, 2, "Pancakes"] If you want to pretty-print the output further, you can pipe it through ``python -mjson.tool``: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" | python -mjson.tool .. code-block:: output [ { "id": 1, "age": 4, "name": "Cleo" }, { "id": 2, "age": 2, "name": "Pancakes" } ] .. _cli_query_binary_json: Binary data in JSON ~~~~~~~~~~~~~~~~~~~ Binary strings are not valid JSON, so BLOB columns containing binary data will be returned as a JSON object containing base64 encoded data, that looks like this: .. code-block:: bash sqlite-utils dogs.db "select name, content from images" | python -mjson.tool .. code-block:: output [ { "name": "transparent.gif", "content": { "$base64": true, "encoded": "R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7" } } ] .. _cli_json_values: Nested JSON values ~~~~~~~~~~~~~~~~~~ If one of your columns contains JSON, by default it will be returned as an escaped string: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" | python -mjson.tool .. code-block:: output [ { "id": 1, "name": "Cleo", "friends": "[{\"name\": \"Pancakes\"}, {\"name\": \"Bailey\"}]" } ] You can use the ``--json-cols`` option to automatically detect these JSON columns and output them as nested JSON data: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --json-cols | python -mjson.tool .. code-block:: output [ { "id": 1, "name": "Cleo", "friends": [ { "name": "Pancakes" }, { "name": "Bailey" } ] } ] .. _cli_query_csv: Returning CSV or TSV -------------------- You can use the ``--csv`` option to return results as CSV: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --csv .. code-block:: output id,age,name 1,4,Cleo 2,2,Pancakes This will default to including the column names as a header row. To exclude the headers, use ``--no-headers``: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --csv --no-headers .. code-block:: output 1,4,Cleo 2,2,Pancakes Use ``--tsv`` instead of ``--csv`` to get back tab-separated values: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --tsv .. code-block:: output id age name 1 4 Cleo 2 2 Pancakes .. _cli_query_table: Table-formatted output ---------------------- You can use the ``--table`` option (or ``-t`` shortcut) to output query results as a table: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --table .. code-block:: output id age name ---- ----- -------- 1 4 Cleo 2 2 Pancakes You can use the ``--fmt`` option to specify different table formats, for example ``rst`` for reStructuredText: .. code-block:: bash sqlite-utils dogs.db "select * from dogs" --fmt rst .. code-block:: output ==== ===== ======== id age name ==== ===== ======== 1 4 Cleo 2 2 Pancakes ==== ===== ======== Available ``--fmt`` options are: .. [[[cog import tabulate cog.out("\n" + "\n".join('- ``{}``'.format(t) for t in tabulate.tabulate_formats) + "\n\n") .. ]]] - ``asciidoc`` - ``double_grid`` - ``double_outline`` - ``fancy_grid`` - ``fancy_outline`` - ``github`` - ``grid`` - ``heavy_grid`` - ``heavy_outline`` - ``html`` - ``jira`` - ``latex`` - ``latex_booktabs`` - ``latex_longtable`` - ``latex_raw`` - ``mediawiki`` - ``mixed_grid`` - ``mixed_outline`` - ``moinmoin`` - ``orgtbl`` - ``outline`` - ``pipe`` - ``plain`` - ``presto`` - ``pretty`` - ``psql`` - ``rounded_grid`` - ``rounded_outline`` - ``rst`` - ``simple`` - ``simple_grid`` - ``simple_outline`` - ``textile`` - ``tsv`` - ``unsafehtml`` - ``youtrack`` .. [[[end]]] This list can also be found by running ``sqlite-utils query --help``. .. _cli_query_raw: Returning raw data, such as binary content ------------------------------------------ If your table contains binary data in a ``BLOB`` you can use the ``--raw`` option to output specific columns directly to standard out. For example, to retrieve a binary image from a ``BLOB`` column and store it in a file you can use the following: .. code-block:: bash sqlite-utils photos.db "select contents from photos where id=1" --raw > myphoto.jpg To return the first column of each result as raw data, separated by newlines, use ``--raw-lines``: .. code-block:: bash sqlite-utils photos.db "select caption from photos" --raw-lines > captions.txt .. _cli_query_parameters: Using named parameters ---------------------- You can pass named parameters to the query using ``-p``: .. code-block:: bash sqlite-utils query dogs.db "select :num * :num2" -p num 5 -p num2 6 .. code-block:: output [{":num * :num2": 30}] These will be correctly quoted and escaped in the SQL query, providing a safe way to combine other values with SQL. .. _cli_query_update_insert_delete: UPDATE, INSERT and DELETE ------------------------- If you execute an ``UPDATE``, ``INSERT`` or ``DELETE`` query the command will return the number of affected rows: .. code-block:: bash sqlite-utils dogs.db "update dogs set age = 5 where name = 'Cleo'" .. code-block:: output [{"rows_affected": 1}] .. _cli_query_functions: Defining custom SQL functions ----------------------------- You can use the ``--functions`` option to pass a block of Python code that defines additional functions which can then be called by your SQL query. This example defines a function which extracts the domain from a URL: .. code-block:: bash sqlite-utils query sites.db "select url, domain(url) from urls" --functions ' from urllib.parse import urlparse def domain(url): return urlparse(url).netloc ' Every callable object defined in the block will be registered as a SQL function with the same name, with the exception of functions with names that begin with an underscore. .. _cli_query_extensions: SQLite extensions ----------------- You can load SQLite extension modules using the ``--load-extension`` option, see :ref:`cli_load_extension`. .. code-block:: bash sqlite-utils dogs.db "select spatialite_version()" --load-extension=spatialite .. code-block:: output [{"spatialite_version()": "4.3.0a"}] .. _cli_query_attach: Attaching additional databases ------------------------------ SQLite supports cross-database SQL queries, which can join data from tables in more than one database file. You can attach one or more additional databases using the ``--attach`` option, providing an alias to use for that database and the path to the SQLite file on disk. This example attaches the ``books.db`` database under the alias ``books`` and then runs a query that combines data from that database with the default ``dogs.db`` database: .. code-block:: bash sqlite-utils dogs.db --attach books books.db \ 'select * from sqlite_master union all select * from books.sqlite_master' .. note:: In Python: :ref:`db.attach() ` .. _cli_memory: Querying data directly using an in-memory database ================================================== The ``sqlite-utils memory`` command works similar to ``sqlite-utils query``, but allows you to execute queries against an in-memory database. You can also pass this command CSV or JSON files which will be loaded into a temporary in-memory table, allowing you to execute SQL against that data without a separate step to first convert it to SQLite. Without any extra arguments, this command executes SQL against the in-memory database directly: .. code-block:: bash sqlite-utils memory 'select sqlite_version()' .. code-block:: output [{"sqlite_version()": "3.35.5"}] It takes all of the same output formatting options as :ref:`sqlite-utils query `: ``--csv`` and ``--csv`` and ``--table`` and ``--nl``: .. code-block:: bash sqlite-utils memory 'select sqlite_version()' --csv .. code-block:: output sqlite_version() 3.35.5 .. code-block:: bash sqlite-utils memory 'select sqlite_version()' --fmt grid .. code-block:: output +--------------------+ | sqlite_version() | +====================+ | 3.35.5 | +--------------------+ .. _cli_memory_csv_json: Running queries directly against CSV or JSON -------------------------------------------- If you have data in CSV or JSON format you can load it into an in-memory SQLite database and run queries against it directly in a single command using ``sqlite-utils memory`` like this: .. code-block:: bash sqlite-utils memory data.csv "select * from data" You can pass multiple files to the command if you want to run joins between data from different files: .. code-block:: bash sqlite-utils memory one.csv two.json \ "select * from one join two on one.id = two.other_id" If your data is JSON it should be the same format supported by the :ref:`sqlite-utils insert command ` - so either a single JSON object (treated as a single row) or a list of JSON objects. CSV data can be comma- or tab- delimited. The in-memory tables will be named after the files without their extensions. The tool also sets up aliases for those tables (using SQL views) as ``t1``, ``t2`` and so on, or you can use the alias ``t`` to refer to the first table: .. code-block:: bash sqlite-utils memory example.csv "select * from t" If two files have the same name they will be assigned a numeric suffix: .. code-block:: bash sqlite-utils memory foo/data.csv bar/data.csv "select * from data_2" To read from standard input, use either ``-`` or ``stdin`` as the filename - then use ``stdin`` or ``t`` or ``t1`` as the table name: .. code-block:: bash cat example.csv | sqlite-utils memory - "select * from stdin" Incoming CSV data will be assumed to use ``utf-8``. If your data uses a different character encoding you can specify that with ``--encoding``: .. code-block:: bash cat example.csv | sqlite-utils memory - "select * from stdin" --encoding=latin-1 If you are joining across multiple CSV files they must all use the same encoding. Column types will be automatically detected in CSV or TSV data, using the same mechanism as ``--detect-types`` described in :ref:`cli_insert_csv_tsv`. You can pass the ``--no-detect-types`` option to disable this automatic type detection and treat all CSV and TSV columns as ``TEXT``. .. _cli_memory_explicit: Explicitly specifying the format -------------------------------- By default, ``sqlite-utils memory`` will attempt to detect the incoming data format (JSON, TSV or CSV) automatically. You can instead specify an explicit format by adding a ``:csv``, ``:tsv``, ``:json`` or ``:nl`` (for newline-delimited JSON) suffix to the filename. For example: .. code-block:: bash sqlite-utils memory one.dat:csv two.dat:nl \ "select * from one union select * from two" Here the contents of ``one.dat`` will be treated as CSV and the contents of ``two.dat`` will be treated as newline-delimited JSON. To explicitly specify the format for data piped into the tool on standard input, use ``stdin:format`` - for example: .. code-block:: bash cat one.dat | sqlite-utils memory stdin:csv "select * from stdin" .. _cli_memory_attach: Joining in-memory data against existing databases using \-\-attach ------------------------------------------------------------------ The :ref:`attach option ` can be used to attach database files to the in-memory connection, enabling joins between in-memory data loaded from a file and tables in existing SQLite database files. An example: .. code-block:: bash echo "id\n1\n3\n5" | sqlite-utils memory - --attach trees trees.db \ "select * from trees.trees where rowid in (select id from stdin)" Here the ``--attach trees trees.db`` option makes the ``trees.db`` database available with an alias of ``trees``. ``select * from trees.trees where ...`` can then query the ``trees`` table in that database. The CSV data that was piped into the script is available in the ``stdin`` table, so ``... where rowid in (select id from stdin)`` can be used to return rows from the ``trees`` table that match IDs that were piped in as CSV content. .. _cli_memory_schema_dump_save: \-\-schema, \-\-analyze, \-\-dump and \-\-save ---------------------------------------------- To see the in-memory database schema that would be used for a file or for multiple files, use ``--schema``: .. code-block:: bash sqlite-utils memory dogs.csv --schema .. code-block:: output CREATE TABLE [dogs] ( [id] INTEGER, [age] INTEGER, [name] TEXT ); CREATE VIEW t1 AS select * from [dogs]; CREATE VIEW t AS select * from [dogs]; You can run the equivalent of the :ref:`analyze-tables ` command using ``--analyze``: .. code-block:: bash sqlite-utils memory dogs.csv --analyze .. code-block:: output dogs.id: (1/3) Total rows: 2 Null rows: 0 Blank rows: 0 Distinct values: 2 dogs.name: (2/3) Total rows: 2 Null rows: 0 Blank rows: 0 Distinct values: 2 dogs.age: (3/3) Total rows: 2 Null rows: 0 Blank rows: 0 Distinct values: 2 You can output SQL that will both create the tables and insert the full data used to populate the in-memory database using ``--dump``: .. code-block:: bash sqlite-utils memory dogs.csv --dump .. code-block:: output BEGIN TRANSACTION; CREATE TABLE [dogs] ( [id] INTEGER, [age] INTEGER, [name] TEXT ); INSERT INTO "dogs" VALUES('1','4','Cleo'); INSERT INTO "dogs" VALUES('2','2','Pancakes'); CREATE VIEW t1 AS select * from [dogs]; CREATE VIEW t AS select * from [dogs]; COMMIT; Passing ``--save other.db`` will instead use that SQL to populate a new database file: .. code-block:: bash sqlite-utils memory dogs.csv --save dogs.db These features are mainly intended as debugging tools - for much more finely grained control over how data is inserted into a SQLite database file see :ref:`cli_inserting_data` and :ref:`cli_insert_csv_tsv`. .. _cli_rows: Returning all rows in a table ============================= You can return every row in a specified table using the ``rows`` command: .. code-block:: bash sqlite-utils rows dogs.db dogs .. code-block:: output [{"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}] This command accepts the same output options as ``query`` - so you can pass ``--nl``, ``--csv``, ``--tsv``, ``--no-headers``, ``--table`` and ``--fmt``. You can use the ``-c`` option to specify a subset of columns to return: .. code-block:: bash sqlite-utils rows dogs.db dogs -c age -c name .. code-block:: output [{"age": 4, "name": "Cleo"}, {"age": 2, "name": "Pancakes"}] You can filter rows using a where clause with the ``--where`` option: .. code-block:: bash sqlite-utils rows dogs.db dogs -c name --where 'name = "Cleo"' .. code-block:: output [{"name": "Cleo"}] Or pass named parameters using ``--where`` in combination with ``-p``: .. code-block:: bash sqlite-utils rows dogs.db dogs -c name --where 'name = :name' -p name Cleo .. code-block:: output [{"name": "Cleo"}] You can define a sort order using ``--order column`` or ``--order 'column desc'``. Use ``--limit N`` to only return the first ``N`` rows. Use ``--offset N`` to return rows starting from the specified offset. .. note:: In Python: :ref:`table.rows ` CLI reference: :ref:`sqlite-utils rows ` .. _cli_tables: Listing tables ============== You can list the names of tables in a database using the ``tables`` command: .. code-block:: bash sqlite-utils tables mydb.db .. code-block:: output [{"table": "dogs"}, {"table": "cats"}, {"table": "chickens"}] You can output this list in CSV using the ``--csv`` or ``--tsv`` options: .. code-block:: bash sqlite-utils tables mydb.db --csv --no-headers .. code-block:: output dogs cats chickens If you just want to see the FTS4 tables, you can use ``--fts4`` (or ``--fts5`` for FTS5 tables): .. code-block:: bash sqlite-utils tables docs.db --fts4 .. code-block:: output [{"table": "docs_fts"}] Use ``--counts`` to include a count of the number of rows in each table: .. code-block:: bash sqlite-utils tables mydb.db --counts .. code-block:: output [{"table": "dogs", "count": 12}, {"table": "cats", "count": 332}, {"table": "chickens", "count": 9}] Use ``--columns`` to include a list of columns in each table: .. code-block:: bash sqlite-utils tables dogs.db --counts --columns .. code-block:: output [{"table": "Gosh", "count": 0, "columns": ["c1", "c2", "c3"]}, {"table": "Gosh2", "count": 0, "columns": ["c1", "c2", "c3"]}, {"table": "dogs", "count": 2, "columns": ["id", "age", "name"]}] Use ``--schema`` to include the schema of each table: .. code-block:: bash sqlite-utils tables dogs.db --schema --table .. code-block:: output table schema ------- ----------------------------------------------- Gosh CREATE TABLE Gosh (c1 text, c2 text, c3 text) Gosh2 CREATE TABLE Gosh2 (c1 text, c2 text, c3 text) dogs CREATE TABLE [dogs] ( [id] INTEGER, [age] INTEGER, [name] TEXT) The ``--nl``, ``--csv``, ``--tsv``, ``--table`` and ``--fmt`` options are also available. .. note:: In Python: :ref:`db.tables or db.table_names() ` CLI reference: :ref:`sqlite-utils tables ` .. _cli_views: Listing views ============= The ``views`` command shows any views defined in the database: .. code-block:: bash sqlite-utils views sf-trees.db --table --counts --columns --schema .. code-block:: output view count columns schema --------- ------- -------------------- -------------------------------------------------------------- demo_view 189144 ['qSpecies'] CREATE VIEW demo_view AS select qSpecies from Street_Tree_List hello 1 ['sqlite_version()'] CREATE VIEW hello as select sqlite_version() It takes the same options as the ``tables`` command: * ``--columns`` * ``--schema`` * ``--counts`` * ``--nl`` * ``--csv`` * ``--tsv`` * ``--table`` .. note:: In Python: :ref:`db.views or db.view_names() ` CLI reference: :ref:`sqlite-utils views ` .. _cli_indexes: Listing indexes =============== The ``indexes`` command lists any indexes configured for the database: .. code-block:: bash sqlite-utils indexes covid.db --table .. code-block:: output table index_name seqno cid name desc coll key -------------------------------- ------------------------------------------------------ ------- ----- ----------------- ------ ------ ----- johns_hopkins_csse_daily_reports idx_johns_hopkins_csse_daily_reports_combined_key 0 12 combined_key 0 BINARY 1 johns_hopkins_csse_daily_reports idx_johns_hopkins_csse_daily_reports_country_or_region 0 1 country_or_region 0 BINARY 1 johns_hopkins_csse_daily_reports idx_johns_hopkins_csse_daily_reports_province_or_state 0 2 province_or_state 0 BINARY 1 johns_hopkins_csse_daily_reports idx_johns_hopkins_csse_daily_reports_day 0 0 day 0 BINARY 1 ny_times_us_counties idx_ny_times_us_counties_date 0 0 date 1 BINARY 1 ny_times_us_counties idx_ny_times_us_counties_fips 0 3 fips 0 BINARY 1 ny_times_us_counties idx_ny_times_us_counties_county 0 1 county 0 BINARY 1 ny_times_us_counties idx_ny_times_us_counties_state 0 2 state 0 BINARY 1 It shows indexes across all tables. To see indexes for specific tables, list those after the database: .. code-block:: bash sqlite-utils indexes covid.db johns_hopkins_csse_daily_reports --table The command defaults to only showing the columns that are explicitly part of the index. To also include auxiliary columns use the ``--aux`` option - these columns will be listed with a ``key`` of ``0``. The command takes the same format options as the ``tables`` and ``views`` commands. .. note:: In Python: :ref:`table.indexes ` CLI reference: :ref:`sqlite-utils indexes ` .. _cli_triggers: Listing triggers ================ The ``triggers`` command shows any triggers configured for the database: .. code-block:: bash sqlite-utils triggers global-power-plants.db --table .. code-block:: output name table sql --------------- --------- ----------------------------------------------------------------- plants_insert plants CREATE TRIGGER [plants_insert] AFTER INSERT ON [plants] BEGIN INSERT OR REPLACE INTO [_counts] VALUES ( 'plants', COALESCE( (SELECT count FROM [_counts] WHERE [table] = 'plants'), 0 ) + 1 ); END It defaults to showing triggers for all tables. To see triggers for one or more specific tables pass their names as arguments: .. code-block:: bash sqlite-utils triggers global-power-plants.db plants The command takes the same format options as the ``tables`` and ``views`` commands. .. note:: In Python: :ref:`table.triggers or db.triggers ` CLI reference: :ref:`sqlite-utils triggers ` .. _cli_schema: Showing the schema ================== The ``sqlite-utils schema`` command shows the full SQL schema for the database: .. code-block:: bash sqlite-utils schema dogs.db .. code-block:: output CREATE TABLE "dogs" ( [id] INTEGER PRIMARY KEY, [name] TEXT ); This will show the schema for every table and index in the database. To view the schema just for a specified subset of tables pass those as additional arguments: .. code-block:: bash sqlite-utils schema dogs.db dogs chickens .. note:: In Python: :ref:`table.schema ` or :ref:`db.schema ` CLI reference: :ref:`sqlite-utils schema ` .. _cli_analyze_tables: Analyzing tables ================ When working with a new database it can be useful to get an idea of the shape of the data. The ``sqlite-utils analyze-tables`` command inspects specified tables (or all tables) and calculates some useful details about each of the columns in those tables. To inspect the ``tags`` table in the ``github.db`` database, run the following: .. code-block:: bash sqlite-utils analyze-tables github.db tags .. code-block:: output tags.repo: (1/3) Total rows: 261 Null rows: 0 Blank rows: 0 Distinct values: 14 Most common: 88: 107914493 75: 140912432 27: 206156866 Least common: 1: 209590345 2: 206649770 2: 303218369 tags.name: (2/3) Total rows: 261 Null rows: 0 Blank rows: 0 Distinct values: 175 Most common: 10: 0.2 9: 0.1 7: 0.3 Least common: 1: 0.1.1 1: 0.11.1 1: 0.1a2 tags.sha: (3/3) Total rows: 261 Null rows: 0 Blank rows: 0 Distinct values: 261 For each column this tool displays the number of null rows, the number of blank rows (rows that contain an empty string), the number of distinct values and, for columns that are not entirely distinct, the most common and least common values. If you do not specify any tables every table in the database will be analyzed: .. code-block:: bash sqlite-utils analyze-tables github.db If you wish to analyze one or more specific columns, use the ``-c`` option: .. code-block:: bash sqlite-utils analyze-tables github.db tags -c sha To show more than 10 common values, use ``--common-limit 20``. To skip the most common or least common value analysis, use ``--no-most`` or ``--no-least``: .. code-block:: bash sqlite-utils analyze-tables github.db tags --common-limit 20 --no-least .. _cli_analyze_tables_save: Saving the analyzed table details --------------------------------- ``analyze-tables`` can take quite a while to run for large database files. You can save the results of the analysis to a database table called ``_analyze_tables_`` using the ``--save`` option: .. code-block:: bash sqlite-utils analyze-tables github.db --save The ``_analyze_tables_`` table has the following schema: .. code-block:: sql CREATE TABLE [_analyze_tables_] ( [table] TEXT, [column] TEXT, [total_rows] INTEGER, [num_null] INTEGER, [num_blank] INTEGER, [num_distinct] INTEGER, [most_common] TEXT, [least_common] TEXT, PRIMARY KEY ([table], [column]) ); The ``most_common`` and ``least_common`` columns will contain nested JSON arrays of the most common and least common values that look like this: .. code-block:: json [ ["Del Libertador, Av", 5068], ["Alberdi Juan Bautista Av.", 4612], ["Directorio Av.", 4552], ["Rivadavia, Av", 4532], ["Yerbal", 4512], ["Cosquín", 4472], ["Estado Plurinacional de Bolivia", 4440], ["Gordillo Timoteo", 4424], ["Montiel", 4360], ["Condarco", 4288] ] .. _cli_create_database: Creating an empty database ========================== You can create a new empty database file using the ``create-database`` command: .. code-block:: bash sqlite-utils create-database empty.db To enable :ref:`cli_wal` on the newly created database add the ``--enable-wal`` option: .. code-block:: bash sqlite-utils create-database empty.db --enable-wal To enable SpatiaLite metadata on a newly created database, add the ``--init-spatialite`` flag: .. code-block:: bash sqlite-utils create-database empty.db --init-spatialite That will look for SpatiaLite in a set of predictable locations. To load it from somewhere else, use the ``--load-extension`` option: .. code-block:: bash sqlite-utils create-database empty.db --init-spatialite --load-extension /path/to/spatialite.so .. _cli_inserting_data: Inserting JSON data =================== If you have data as JSON, you can use ``sqlite-utils insert tablename`` to insert it into a database. The table will be created with the correct (automatically detected) columns if it does not already exist. You can pass in a single JSON object or a list of JSON objects, either as a filename or piped directly to standard-in (by using ``-`` as the filename). Here's the simplest possible example: .. code-block:: bash echo '{"name": "Cleo", "age": 4}' | sqlite-utils insert dogs.db dogs - To specify a column as the primary key, use ``--pk=column_name``. To create a compound primary key across more than one column, use ``--pk`` multiple times. If you feed it a JSON list it will insert multiple records. For example, if ``dogs.json`` looks like this: .. code-block:: json [ { "id": 1, "name": "Cleo", "age": 4 }, { "id": 2, "name": "Pancakes", "age": 2 }, { "id": 3, "name": "Toby", "age": 6 } ] You can import all three records into an automatically created ``dogs`` table and set the ``id`` column as the primary key like so: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.json --pk=id You can skip inserting any records that have a primary key that already exists using ``--ignore``: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.json --ignore You can delete all the existing rows in the table before inserting the new records using ``--truncate``: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.json --truncate You can add the ``--analyze`` option to run ``ANALYZE`` against the table after the rows have been inserted. .. _cli_inserting_data_binary: Inserting binary data --------------------- You can insert binary data into a BLOB column by first encoding it using base64 and then structuring it like this: .. code-block:: json [ { "name": "transparent.gif", "content": { "$base64": true, "encoded": "R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7" } } ] .. _cli_inserting_data_nl_json: Inserting newline-delimited JSON -------------------------------- You can also import `newline-delimited JSON `__ using the ``--nl`` option: .. code-block:: bash echo '{"id": 1, "name": "Cleo"} {"id": 2, "name": "Suna"}' | sqlite-utils insert creatures.db creatures - --nl Newline-delimited JSON consists of full JSON objects separated by newlines. If you are processing data using ``jq`` you can use the ``jq -c`` option to output valid newline-delimited JSON. Since `Datasette `__ can export newline-delimited JSON, you can combine the Datasette and ``sqlite-utils`` like so: .. code-block:: bash curl -L "https://latest.datasette.io/fixtures/facetable.json?_shape=array&_nl=on" \ | sqlite-utils insert nl-demo.db facetable - --pk=id --nl You can also pipe ``sqlite-utils`` together to create a new SQLite database file containing the results of a SQL query against another database: .. code-block:: bash sqlite-utils sf-trees.db \ "select TreeID, qAddress, Latitude, Longitude from Street_Tree_List" --nl \ | sqlite-utils insert saved.db trees - --nl .. code-block:: bash sqlite-utils saved.db "select * from trees limit 5" --csv .. code-block:: output TreeID,qAddress,Latitude,Longitude 141565,501X Baker St,37.7759676911831,-122.441396661871 232565,940 Elizabeth St,37.7517102172731,-122.441498017841 119263,495X Lakeshore Dr,, 207368,920 Kirkham St,37.760210314285,-122.47073935813 188702,1501 Evans Ave,37.7422086702947,-122.387293152263 .. _cli_inserting_data_flatten: Flattening nested JSON objects ------------------------------ ``sqlite-utils insert`` and ``sqlite-utils memory`` both expect incoming JSON data to consist of an array of JSON objects, where the top-level keys of each object will become columns in the created database table. If your data is nested you can use the ``--flatten`` option to create columns that are derived from the nested data. Consider this example document, in a file called ``log.json``: .. code-block:: json { "httpRequest": { "latency": "0.112114537s", "requestMethod": "GET", "requestSize": "534", "status": 200 }, "insertId": "6111722f000b5b4c4d4071e2", "labels": { "service": "datasette-io" } } Inserting this into a table using ``sqlite-utils insert logs.db logs log.json`` will create a table with the following schema: .. code-block:: sql CREATE TABLE [logs] ( [httpRequest] TEXT, [insertId] TEXT, [labels] TEXT ); With the ``--flatten`` option columns will be created using ``topkey_nextkey`` column names - so running ``sqlite-utils insert logs.db logs log.json --flatten`` will create the following schema instead: .. code-block:: sql CREATE TABLE [logs] ( [httpRequest_latency] TEXT, [httpRequest_requestMethod] TEXT, [httpRequest_requestSize] TEXT, [httpRequest_status] INTEGER, [insertId] TEXT, [labels_service] TEXT ); .. _cli_insert_csv_tsv: Inserting CSV or TSV data ========================= If your data is in CSV format, you can insert it using the ``--csv`` option: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.csv --csv For tab-delimited data, use ``--tsv``: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.tsv --tsv Data is expected to be encoded as Unicode UTF-8. If your data is an another character encoding you can specify it using the ``--encoding`` option: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.tsv --tsv --encoding=latin-1 To stop inserting after a specified number of records - useful for getting a faster preview of a large file - use the ``--stop-after`` option: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.csv --csv --stop-after=10 A progress bar is displayed when inserting data from a file. You can hide the progress bar using the ``--silent`` option. By default every column inserted from a CSV or TSV file will be of type ``TEXT``. To automatically detect column types - resulting in a mix of ``TEXT``, ``INTEGER`` and ``FLOAT`` columns, use the ``--detect-types`` option (or its shortcut ``-d``). For example, given a ``creatures.csv`` file containing this: .. code-block:: name,age,weight Cleo,6,45.5 Dori,1,3.5 The following command: .. code-block:: bash sqlite-utils insert creatures.db creatures creatures.csv --csv --detect-types Will produce this schema: .. code-block:: bash sqlite-utils schema creatures.db .. code-block:: output CREATE TABLE "creatures" ( [name] TEXT, [age] INTEGER, [weight] FLOAT ); You can set the ``SQLITE_UTILS_DETECT_TYPES`` environment variable if you want ``--detect-types`` to be the default behavior: .. code-block:: bash export SQLITE_UTILS_DETECT_TYPES=1 If a CSV or TSV file includes empty cells, like this one: .. code-block:: csv name,age,weight Cleo,6, Dori,,3.5 They will be imported into SQLite as empty string values, ``""``. To import them as ``NULL`` values instead, use the ``--empty-null`` option: .. code-block:: bash sqlite-utils insert creatures.db creatures creatures.csv --csv --empty-null .. _cli_insert_csv_tsv_delimiter: Alternative delimiters and quote characters ------------------------------------------- If your file uses a delimiter other than ``,`` or a quote character other than ``"`` you can attempt to detect delimiters or you can specify them explicitly. The ``--sniff`` option can be used to attempt to detect the delimiters: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.csv --sniff Alternatively, you can specify them using the ``--delimiter`` and ``--quotechar`` options. Here's a CSV file that uses ``;`` for delimiters and the ``|`` symbol for quote characters:: name;description Cleo;|Very fine; a friendly dog| Pancakes;A local corgi You can import that using: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.csv --delimiter=";" --quotechar="|" Passing ``--delimiter``, ``--quotechar`` or ``--sniff`` implies ``--csv``, so you can omit the ``--csv`` option. .. _cli_insert_csv_tsv_no_header: CSV files without a header row ------------------------------ The first row of any CSV or TSV file is expected to contain the names of the columns in that file. If your file does not include this row, you can use the ``--no-headers`` option to specify that the tool should not use that fist row as headers. If you do this, the table will be created with column names called ``untitled_1`` and ``untitled_2`` and so on. You can then rename them using the ``sqlite-utils transform ... --rename`` command, see :ref:`cli_transform_table`. .. _cli_insert_unstructured: Inserting unstructured data with \-\-lines and \-\-text ======================================================= If you have an unstructured file you can insert its contents into a table with a single ``line`` column containing each line from the file using ``--lines``. This can be useful if you intend to further analyze those lines using SQL string functions or :ref:`sqlite-utils convert `: .. code-block:: bash sqlite-utils insert logs.db loglines logfile.log --lines This will produce the following schema: .. code-block:: sql CREATE TABLE [loglines] ( [line] TEXT ); You can also insert the entire contents of the file into a single column called ``text`` using ``--text``: .. code-block:: bash sqlite-utils insert content.db content file.txt --text The schema here will be: .. code-block:: sql CREATE TABLE [content] ( [text] TEXT ); .. _cli_insert_convert: Applying conversions while inserting data ========================================= The ``--convert`` option can be used to apply a Python conversion function to imported data before it is inserted into the database. It works in a similar way to :ref:`sqlite-utils convert `. Your Python function will be passed a dictionary called ``row`` for each item that is being imported. You can modify that dictionary and return it - or return a fresh dictionary - to change the data that will be inserted. Given a JSON file called ``dogs.json`` containing this: .. code-block:: json [ {"id": 1, "name": "Cleo"}, {"id": 2, "name": "Pancakes"} ] The following command will insert that data and add an ``is_good`` column set to ``1`` for each dog: .. code-block:: bash sqlite-utils insert dogs.db dogs dogs.json --convert 'row["is_good"] = 1' The ``--convert`` option also works with the ``--csv``, ``--tsv`` and ``--nl`` insert options. As with ``sqlite-utils convert`` you can use ``--import`` to import additional Python modules, see :ref:`cli_convert_import` for details. You can also pass code that runs some initialization steps and defines a ``convert(value)`` function, see :ref:`cli_convert_complex`. .. _cli_insert_convert_lines: \-\-convert with \-\-lines -------------------------- Things work slightly differently when combined with the ``--lines`` or ``--text`` options. With ``--lines``, instead of being passed a ``row`` dictionary your function will be passed a ``line`` string representing each line of the input. Given a file called ``access.log`` containing the following:: INFO: 127.0.0.1:60581 - GET / HTTP/1.1 200 OK INFO: 127.0.0.1:60581 - GET /foo/-/static/app.css?cead5a HTTP/1.1 200 OK You could convert it into structured data like so: .. code-block:: bash sqlite-utils insert logs.db loglines access.log --convert ' type, source, _, verb, path, _, status, _ = line.split() return { "type": type, "source": source, "verb": verb, "path": path, "status": status, }' --lines The resulting table would look like this: ====== =============== ====== ============================ ======== type source verb path status ====== =============== ====== ============================ ======== INFO: 127.0.0.1:60581 GET / 200 INFO: 127.0.0.1:60581 GET /foo/-/static/app.css?cead5a 200 ====== =============== ====== ============================ ======== .. _cli_insert_convert_text: \-\-convert with \-\-text ------------------------- With ``--text`` the entire input to the command will be made available to the function as a variable called ``text``. The function can return a single dictionary which will be inserted as a single row, or it can return a list or iterator of dictionaries, each of which will be inserted. Here's how to use ``--convert`` and ``--text`` to insert one record per word in the input: .. code-block:: bash echo 'A bunch of words' | sqlite-utils insert words.db words - \ --text --convert '({"word": w} for w in text.split())' The result looks like this: .. code-block:: bash sqlite-utils dump words.db .. code-block:: output BEGIN TRANSACTION; CREATE TABLE [words] ( [word] TEXT ); INSERT INTO "words" VALUES('A'); INSERT INTO "words" VALUES('bunch'); INSERT INTO "words" VALUES('of'); INSERT INTO "words" VALUES('words'); COMMIT; .. _cli_insert_replace: Insert-replacing data ===================== The ``--replace`` option to ``insert`` causes any existing records with the same primary key to be replaced entirely by the new records. To replace a dog with in ID of 2 with a new record, run the following: .. code-block:: bash echo '{"id": 2, "name": "Pancakes", "age": 3}' | \ sqlite-utils insert dogs.db dogs - --pk=id --replace .. _cli_upsert: Upserting data ============== Upserting is update-or-insert. If a row exists with the specified primary key the provided columns will be updated. If no row exists that row will be created. Unlike ``insert --replace``, an upsert will ignore any column values that exist but are not present in the upsert document. For example: .. code-block:: bash echo '{"id": 2, "age": 4}' | \ sqlite-utils upsert dogs.db dogs - --pk=id This will update the dog with an ID of 2 to have an age of 4, creating a new record (with a null name) if one does not exist. If a row DOES exist the name will be left as-is. The command will fail if you reference columns that do not exist on the table. To automatically create missing columns, use the ``--alter`` option. .. note:: ``upsert`` in sqlite-utils 1.x worked like ``insert ... --replace`` does in 2.x. See `issue #66 `__ for details of this change. .. _cli_bulk: Executing SQL in bulk ===================== If you have a JSON, newline-delimited JSON, CSV or TSV file you can execute a bulk SQL query using each of the records in that file using the ``sqlite-utils bulk`` command. The command takes the database file, the SQL to be executed and the file containing records to be used when evaluating the SQL query. The SQL query should include ``:named`` parameters that match the keys in the records. For example, given a ``chickens.csv`` CSV file containing the following: .. code-block:: id,name 1,Blue 2,Snowy 3,Azi 4,Lila 5,Suna 6,Cardi You could insert those rows into a pre-created ``chickens`` table like so: .. code-block:: bash sqlite-utils bulk chickens.db \ 'insert into chickens (id, name) values (:id, :name)' \ chickens.csv --csv This command takes the same options as the ``sqlite-utils insert`` command - so it defaults to expecting JSON but can accept other formats using ``--csv`` or ``--tsv`` or ``--nl`` or other options described above. By default all of the SQL queries will be executed in a single transaction. To commit every 20 records, use ``--batch-size 20``. .. _cli_insert_files: Inserting data from files ========================= The ``insert-files`` command can be used to insert the content of files, along with their metadata, into a SQLite table. Here's an example that inserts all of the GIF files in the current directory into a ``gifs.db`` database, placing the file contents in an ``images`` table: .. code-block:: bash sqlite-utils insert-files gifs.db images *.gif You can also pass one or more directories, in which case every file in those directories will be added recursively: .. code-block:: bash sqlite-utils insert-files gifs.db images path/to/my-gifs By default this command will create a table with the following schema: .. code-block:: sql CREATE TABLE [images] ( [path] TEXT PRIMARY KEY, [content] BLOB, [size] INTEGER ); Content will be treated as binary by default and stored in a ``BLOB`` column. You can use the ``--text`` option to store that content in a ``TEXT`` column instead. You can customize the schema using one or more ``-c`` options. For a table schema that includes just the path, MD5 hash and last modification time of the file, you would use this: .. code-block:: bash sqlite-utils insert-files gifs.db images *.gif -c path -c md5 -c mtime --pk=path This will result in the following schema: .. code-block:: sql CREATE TABLE [images] ( [path] TEXT PRIMARY KEY, [md5] TEXT, [mtime] FLOAT ); Note that there's no ``content`` column here at all - if you specify custom columns using ``-c`` you need to include ``-c content`` to create that column. You can change the name of one of these columns using a ``-c colname:coldef`` parameter. To rename the ``mtime`` column to ``last_modified`` you would use this: .. code-block:: bash sqlite-utils insert-files gifs.db images *.gif \ -c path -c md5 -c last_modified:mtime --pk=path You can pass ``--replace`` or ``--upsert`` to indicate what should happen if you try to insert a file with an existing primary key. Pass ``--alter`` to cause any missing columns to be added to the table. The full list of column definitions you can use is as follows: ``name`` The name of the file, e.g. ``cleo.jpg`` ``path`` The path to the file relative to the root folder, e.g. ``pictures/cleo.jpg`` ``fullpath`` The fully resolved path to the image, e.g. ``/home/simonw/pictures/cleo.jpg`` ``sha256`` The SHA256 hash of the file contents ``md5`` The MD5 hash of the file contents ``mode`` The permission bits of the file, as an integer - you may want to convert this to octal ``content`` The binary file contents, which will be stored as a BLOB ``content_text`` The text file contents, which will be stored as TEXT ``mtime`` The modification time of the file, as floating point seconds since the Unix epoch ``ctime`` The creation time of the file, as floating point seconds since the Unix epoch ``mtime_int`` The modification time as an integer rather than a float ``ctime_int`` The creation time as an integer rather than a float ``mtime_iso`` The modification time as an ISO timestamp, e.g. ``2020-07-27T04:24:06.654246`` ``ctime_iso`` The creation time is an ISO timestamp ``size`` The integer size of the file in bytes ``stem`` The filename without the extension - for ``file.txt.gz`` this would be ``file.txt`` ``suffix`` The file extension - for ``file.txt.gz`` this would be ``.gz`` You can insert data piped from standard input like this: .. code-block:: bash cat dog.jpg | sqlite-utils insert-files dogs.db pics - --name=dog.jpg The ``-`` argument indicates data should be read from standard input. The string passed using the ``--name`` option will be used for the file name and path values. When inserting data from standard input only the following column definitions are supported: ``name``, ``path``, ``content``, ``content_text``, ``sha256``, ``md5`` and ``size``. .. _cli_convert: Converting data in columns ========================== The ``convert`` command can be used to transform the data in a specified column - for example to parse a date string into an ISO timestamp, or to split a string of tags into a JSON array. The command accepts a database, table, one or more columns and a string of Python code to be executed against the values from those columns. The following example would replace the values in the ``headline`` column in the ``articles`` table with an upper-case version: .. code-block:: bash sqlite-utils convert content.db articles headline 'value.upper()' The Python code is passed as a string. Within that Python code the ``value`` variable will be the value of the current column. The code you provide will be compiled into a function that takes ``value`` as a single argument. If you break your function body into multiple lines the last line should be a ``return`` statement: .. code-block:: bash sqlite-utils convert content.db articles headline ' value = str(value) return value.upper()' Your code will be automatically wrapped in a function, but you can also define a function called ``convert(value)`` which will be called, if available: .. code-block:: bash sqlite-utils convert content.db articles headline ' def convert(value): return value.upper()' Use a ``CODE`` value of ``-`` to read from standard input: .. code-block:: bash cat mycode.py | sqlite-utils convert content.db articles headline - Where ``mycode.py`` contains a fragment of Python code that looks like this: .. code-block:: python def convert(value): return value.upper() The conversion will be applied to every row in the specified table. You can limit that to just rows that match a ``WHERE`` clause using ``--where``: .. code-block:: bash sqlite-utils convert content.db articles headline 'value.upper()' \ --where "headline like '%cat%'" You can include named parameters in your where clause and populate them using one or more ``--param`` options: .. code-block:: bash sqlite-utils convert content.db articles headline 'value.upper()' \ --where "headline like :query" \ --param query '%cat%' The ``--dry-run`` option will output a preview of the conversion against the first ten rows, without modifying the database. By default any rows with a falsey value for the column - such as ``0`` or ``null`` - will be skipped. Use the ``--no-skip-false`` option to disable this behaviour. .. _cli_convert_import: Importing additional modules ---------------------------- You can specify Python modules that should be imported and made available to your code using one or more ``--import`` options. This example uses the ``textwrap`` module to wrap the ``content`` column at 100 characters: .. code-block:: bash sqlite-utils convert content.db articles content \ '"\n".join(textwrap.wrap(value, 100))' \ --import=textwrap This supports nested imports as well, for example to use `ElementTree `__: .. code-block:: bash sqlite-utils convert content.db articles content \ 'xml.etree.ElementTree.fromstring(value).attrib["title"]' \ --import=xml.etree.ElementTree .. _cli_convert_debugger: Using the debugger ------------------ If an error occurs while running your conversion operation you may see a message like this:: user-defined function raised exception Add the ``--pdb`` option to catch the error and open the Python debugger at that point. The conversion operation will exit after you type ``q`` in the debugger. Here's an example debugging session. First, create a ``articles`` table with invalid XML in the ``content`` column: .. code-block:: bash echo '{"content": "This is not XML"}' | sqlite-utils insert content.db articles - Now run the conversion with the ``--pdb`` option: .. code-block:: bash sqlite-utils convert content.db articles content \ 'xml.etree.ElementTree.fromstring(value).attrib["title"]' \ --import=xml.etree.ElementTree \ --pdb When the error occurs the debugger will open:: Exception raised, dropping into pdb...: syntax error: line 1, column 0 > .../python3.11/xml/etree/ElementTree.py(1338)XML() -> parser.feed(text) (Pdb) args text = 'This is not XML' parser = (Pdb) q ``args`` here shows the arguments to the current function in the stack. The Python `pdb documentation `__ has full details on the other available commands. .. _cli_convert_complex: Defining a convert() function ----------------------------- Instead of providing a single line of code to be executed against each value, you can define a function called ``convert(value)``. This mechanism can be used to execute one-off initialization code that runs once at the start of the conversion run. The following example adds a new ``score`` column, then updates it to list a random number - after first seeding the random number generator to ensure that multiple runs produce the same results: .. code-block:: bash sqlite-utils add-column content.db articles score float --not-null-default 1.0 sqlite-utils convert content.db articles score ' import random random.seed(10) def convert(value): return random.random() ' .. _cli_convert_recipes: sqlite-utils convert recipes ---------------------------- Various built-in recipe functions are available for common operations. These are: ``r.jsonsplit(value, delimiter=',', type=)`` Convert a string like ``a,b,c`` into a JSON array ``["a", "b", "c"]`` The ``delimiter`` parameter can be used to specify a different delimiter. The ``type`` parameter can be set to ``float`` or ``int`` to produce a JSON array of different types, for example if the column's string value was ``1.2,3,4.5`` the following:: r.jsonsplit(value, type=float) Would produce an array like this: ``[1.2, 3.0, 4.5]`` ``r.parsedate(value, dayfirst=False, yearfirst=False, errors=None)`` Parse a date and convert it to ISO date format: ``yyyy-mm-dd`` In the case of dates such as ``03/04/05`` U.S. ``MM/DD/YY`` format is assumed - you can use ``dayfirst=True`` or ``yearfirst=True`` to change how these ambiguous dates are interpreted. Use the ``errors=`` parameter to specify what should happen if a value cannot be parsed. By default, if any value cannot be parsed an error will be occurred and all values will be left as they were. Set ``errors=r.IGNORE`` to ignore any values that cannot be parsed, leaving them unchanged. Set ``errors=r.SET_NULL`` to set any values that cannot be parsed to ``null``. ``r.parsedatetime(value, dayfirst=False, yearfirst=False, errors=None)`` Parse a datetime and convert it to ISO datetime format: ``yyyy-mm-ddTHH:MM:SS`` These recipes can be used in the code passed to ``sqlite-utils convert`` like this: .. code-block:: bash sqlite-utils convert my.db mytable mycolumn \ 'r.jsonsplit(value)' To use any of the documented parameters, do this: .. code-block:: bash sqlite-utils convert my.db mytable mycolumn \ 'r.jsonsplit(value, delimiter=":")' .. _cli_convert_output: Saving the result to a different column --------------------------------------- The ``--output`` and ``--output-type`` options can be used to save the result of the conversion to a separate column, which will be created if that column does not already exist: .. code-block:: bash sqlite-utils convert content.db articles headline 'value.upper()' \ --output headline_upper The type of the created column defaults to ``text``, but a different column type can be specified using ``--output-type``. This example will create a new floating point column called ``id_as_a_float`` with a copy of each item's ID increased by 0.5: .. code-block:: bash sqlite-utils convert content.db articles id 'float(value) + 0.5' \ --output id_as_a_float \ --output-type float You can drop the original column at the end of the operation by adding ``--drop``. .. _cli_convert_multi: Converting a column into multiple columns ----------------------------------------- Sometimes you may wish to convert a single column into multiple derived columns. For example, you may have a ``location`` column containing ``latitude,longitude`` values which you wish to split out into separate ``latitude`` and ``longitude`` columns. You can achieve this using the ``--multi`` option to ``sqlite-utils convert``. This option expects your Python code to return a Python dictionary: new columns well be created and populated for each of the keys in that dictionary. For the ``latitude,longitude`` example you would use the following: .. code-block:: bash sqlite-utils convert demo.db places location \ 'bits = value.split(",") return { "latitude": float(bits[0]), "longitude": float(bits[1]), }' --multi The type of the returned values will be taken into account when creating the new columns. In this example, the resulting database schema will look like this: .. code-block:: sql CREATE TABLE [places] ( [location] TEXT, [latitude] FLOAT, [longitude] FLOAT ); The code function can also return ``None``, in which case its output will be ignored. You can drop the original column at the end of the operation by adding ``--drop``. .. _cli_create_table: Creating tables =============== Most of the time creating tables by inserting example data is the quickest approach. If you need to create an empty table in advance of inserting data you can do so using the ``create-table`` command: .. code-block:: bash sqlite-utils create-table mydb.db mytable id integer name text --pk=id This will create a table called ``mytable`` with two columns - an integer ``id`` column and a text ``name`` column. It will set the ``id`` column to be the primary key. You can pass as many column-name column-type pairs as you like. Valid types are ``integer``, ``text``, ``float`` and ``blob``. You can specify columns that should be NOT NULL using ``--not-null colname``. You can specify default values for columns using ``--default colname defaultvalue``. .. code-block:: bash sqlite-utils create-table mydb.db mytable \ id integer \ name text \ age integer \ is_good integer \ --not-null name \ --not-null age \ --default is_good 1 \ --pk=id .. code-block:: bash sqlite-utils tables mydb.db --schema -t .. code-block:: output table schema ------- -------------------------------- mytable CREATE TABLE [mytable] ( [id] INTEGER PRIMARY KEY, [name] TEXT NOT NULL, [age] INTEGER NOT NULL, [is_good] INTEGER DEFAULT '1' ) You can specify foreign key relationships between the tables you are creating using ``--fk colname othertable othercolumn``: .. code-block:: bash sqlite-utils create-table books.db authors \ id integer \ name text \ --pk=id sqlite-utils create-table books.db books \ id integer \ title text \ author_id integer \ --pk=id \ --fk author_id authors id .. code-block:: bash sqlite-utils tables books.db --schema -t .. code-block:: output table schema ------- ------------------------------------------------- authors CREATE TABLE [authors] ( [id] INTEGER PRIMARY KEY, [name] TEXT ) books CREATE TABLE [books] ( [id] INTEGER PRIMARY KEY, [title] TEXT, [author_id] INTEGER REFERENCES [authors]([id]) ) If a table with the same name already exists, you will get an error. You can choose to silently ignore this error with ``--ignore``, or you can replace the existing table with a new, empty table using ``--replace``. You can also pass ``--transform`` to transform the existing table to match the new schema. See :ref:`python_api_explicit_create` in the Python library documentation for details of how this option works. .. _cli_renaming_tables: Renaming a table ================ Yo ucan rename a table using the ``rename-table`` command: .. code-block:: bash sqlite-utils rename-table mydb.db oldname newname Pass ``--ignore`` to ignore any errors caused by the table not existing, or the new name already being in use. .. _cli_duplicate_table: Duplicating tables ================== The ``duplicate`` command duplicates a table - creating a new table with the same schema and a copy of all of the rows: .. code-block:: bash sqlite-utils duplicate books.db authors authors_copy .. _cli_drop_table: Dropping tables =============== You can drop a table using the ``drop-table`` command: .. code-block:: bash sqlite-utils drop-table mydb.db mytable Use ``--ignore`` to ignore the error if the table does not exist. .. _cli_transform_table: Transforming tables =================== The ``transform`` command allows you to apply complex transformations to a table that cannot be implemented using a regular SQLite ``ALTER TABLE`` command. See :ref:`python_api_transform` for details of how this works. .. code-block:: bash sqlite-utils transform mydb.db mytable \ --drop column1 \ --rename column2 column_renamed Every option for this table (with the exception of ``--pk-none``) can be specified multiple times. The options are as follows: ``--type column-name new-type`` Change the type of the specified column. Valid types are ``integer``, ``text``, ``float``, ``blob``. ``--drop column-name`` Drop the specified column. ``--rename column-name new-name`` Rename this column to a new name. ``--column-order column`` Use this multiple times to specify a new order for your columns. ``-o`` shortcut is also available. ``--not-null column-name`` Set this column as ``NOT NULL``. ``--not-null-false column-name`` For a column that is currently set as ``NOT NULL``, remove the ``NOT NULL``. ``--pk column-name`` Change the primary key column for this table. Pass ``--pk`` multiple times if you want to create a compound primary key. ``--pk-none`` Remove the primary key from this table, turning it into a ``rowid`` table. ``--default column-name value`` Set the default value of this column. ``--default-none column`` Remove the default value for this column. ``--drop-foreign-key column`` Drop the specified foreign key. ``--add-foregn-key column other_table other_column`` Add a foreign key constraint to ``column`` pointing to ``other_table.other_column``. If you want to see the SQL that will be executed to make the change without actually executing it, add the ``--sql`` flag. For example: .. code-block:: bash sqlite-utils transform fixtures.db roadside_attractions \ --rename pk id \ --default name Untitled \ --column-order id \ --column-order longitude \ --column-order latitude \ --drop address \ --sql .. code-block:: output CREATE TABLE [roadside_attractions_new_4033a60276b9] ( [id] INTEGER PRIMARY KEY, [longitude] FLOAT, [latitude] FLOAT, [name] TEXT DEFAULT 'Untitled' ); INSERT INTO [roadside_attractions_new_4033a60276b9] ([longitude], [latitude], [id], [name]) SELECT [longitude], [latitude], [pk], [name] FROM [roadside_attractions]; DROP TABLE [roadside_attractions]; ALTER TABLE [roadside_attractions_new_4033a60276b9] RENAME TO [roadside_attractions]; .. _cli_transform_table_add_primary_key_to_rowid: Adding a primary key to a rowid table ------------------------------------- SQLite tables that are created without an explicit primary key are created as `rowid tables `__. They still have a numeric primary key which is available in the ``rowid`` column, but that column is not included in the output of ``select *``. Here's an example: .. code-block:: bash echo '[{"name": "Azi"}, {"name": "Suna"}]' | \ sqlite-utils insert chickens.db chickens - sqlite-utils schema chickens.db .. code-block:: output CREATE TABLE [chickens] ( [name] TEXT ); .. code-block:: bash sqlite-utils chickens.db 'select * from chickens' .. code-block:: output [{"name": "Azi"}, {"name": "Suna"}] .. code-block:: bash sqlite-utils chickens.db 'select rowid, * from chickens' .. code-block:: output [{"rowid": 1, "name": "Azi"}, {"rowid": 2, "name": "Suna"}] You can use ``sqlite-utils transform ... --pk id`` to add a primary key column called ``id`` to the table. The primary key will be created as an ``INTEGER PRIMARY KEY`` and the existing ``rowid`` values will be copied across to it. It will automatically increment as new rows are added to the table: .. code-block:: bash sqlite-utils transform chickens.db chickens --pk id .. code-block:: bash sqlite-utils schema chickens.db .. code-block:: output CREATE TABLE "chickens" ( [id] INTEGER PRIMARY KEY, [name] TEXT ); .. code-block:: bash sqlite-utils chickens.db 'select * from chickens' .. code-block:: output [{"id": 1, "name": "Azi"}, {"id": 2, "name": "Suna"}] .. code-block:: bash echo '{"name": "Cardi"}' | sqlite-utils insert chickens.db chickens - .. code-block:: bash sqlite-utils chickens.db 'select * from chickens' .. code-block:: output [{"id": 1, "name": "Azi"}, {"id": 2, "name": "Suna"}, {"id": 3, "name": "Cardi"}] .. _cli_extract: Extracting columns into a separate table ======================================== The ``sqlite-utils extract`` command can be used to extract specified columns into a separate table. Take a look at the Python API documentation for :ref:`python_api_extract` for a detailed description of how this works, including examples of table schemas before and after running an extraction operation. The command takes a database, table and one or more columns that should be extracted. To extract the ``species`` column from the ``trees`` table you would run: .. code-block:: bash sqlite-utils extract my.db trees species This would produce the following schema: .. code-block:: sql CREATE TABLE "trees" ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [species_id] INTEGER, FOREIGN KEY(species_id) REFERENCES species(id) ); CREATE TABLE [species] ( [id] INTEGER PRIMARY KEY, [species] TEXT ); CREATE UNIQUE INDEX [idx_species_species] ON [species] ([species]); The command takes the following options: ``--table TEXT`` The name of the lookup to extract columns to. This defaults to using the name of the columns that are being extracted. ``--fk-column TEXT`` The name of the foreign key column to add to the table. Defaults to ``columnname_id``. ``--rename `` Use this option to rename the columns created in the new lookup table. ``--silent`` Don't display the progress bar. Here's a more complex example that makes use of these options. It converts `this CSV file `__ full of global power plants into SQLite, then extracts the ``country`` and ``country_long`` columns into a separate ``countries`` table: .. code-block:: bash wget 'https://github.com/wri/global-power-plant-database/blob/232a6666/output_database/global_power_plant_database.csv?raw=true' sqlite-utils insert global.db power_plants \ 'global_power_plant_database.csv?raw=true' --csv # Extract those columns: sqlite-utils extract global.db power_plants country country_long \ --table countries \ --fk-column country_id \ --rename country_long name After running the above, the command ``sqlite-utils schema global.db`` reveals the following schema: .. code-block:: sql CREATE TABLE [countries] ( [id] INTEGER PRIMARY KEY, [country] TEXT, [name] TEXT ); CREATE TABLE "power_plants" ( [country_id] INTEGER, [name] TEXT, [gppd_idnr] TEXT, [capacity_mw] TEXT, [latitude] TEXT, [longitude] TEXT, [primary_fuel] TEXT, [other_fuel1] TEXT, [other_fuel2] TEXT, [other_fuel3] TEXT, [commissioning_year] TEXT, [owner] TEXT, [source] TEXT, [url] TEXT, [geolocation_source] TEXT, [wepp_id] TEXT, [year_of_capacity_data] TEXT, [generation_gwh_2013] TEXT, [generation_gwh_2014] TEXT, [generation_gwh_2015] TEXT, [generation_gwh_2016] TEXT, [generation_gwh_2017] TEXT, [generation_data_source] TEXT, [estimated_generation_gwh] TEXT, FOREIGN KEY([country_id]) REFERENCES [countries]([id]) ); CREATE UNIQUE INDEX [idx_countries_country_name] ON [countries] ([country], [name]); .. _cli_create_view: Creating views ============== You can create a view using the ``create-view`` command: .. code-block:: bash sqlite-utils create-view mydb.db version "select sqlite_version()" .. code-block:: bash sqlite-utils mydb.db "select * from version" .. code-block:: output [{"sqlite_version()": "3.31.1"}] Use ``--replace`` to replace an existing view of the same name, and ``--ignore`` to do nothing if a view already exists. .. _cli_drop_view: Dropping views ============== You can drop a view using the ``drop-view`` command: .. code-block:: bash sqlite-utils drop-view myview Use ``--ignore`` to ignore the error if the view does not exist. .. _cli_add_column: Adding columns ============== You can add a column using the ``add-column`` command: .. code-block:: bash sqlite-utils add-column mydb.db mytable nameofcolumn text The last argument here is the type of the column to be created. You can use one of ``text``, ``integer``, ``float`` or ``blob``. If you leave it off, ``text`` will be used. You can add a column that is a foreign key reference to another table using the ``--fk`` option: .. code-block:: bash sqlite-utils add-column mydb.db dogs species_id --fk species This will automatically detect the name of the primary key on the species table and use that (and its type) for the new column. You can explicitly specify the column you wish to reference using ``--fk-col``: .. code-block:: bash sqlite-utils add-column mydb.db dogs species_id --fk species --fk-col ref You can set a ``NOT NULL DEFAULT 'x'`` constraint on the new column using ``--not-null-default``: .. code-block:: bash sqlite-utils add-column mydb.db dogs friends_count integer --not-null-default 0 .. _cli_add_column_alter: Adding columns automatically on insert/update ============================================= You can use the ``--alter`` option to automatically add new columns if the data you are inserting or upserting is of a different shape: .. code-block:: bash sqlite-utils insert dogs.db dogs new-dogs.json --pk=id --alter .. _cli_add_foreign_key: Adding foreign key constraints ============================== The ``add-foreign-key`` command can be used to add new foreign key references to an existing table - something which SQLite's ``ALTER TABLE`` command does not support. To add a foreign key constraint pointing the ``books.author_id`` column to ``authors.id`` in another table, do this: .. code-block:: bash sqlite-utils add-foreign-key books.db books author_id authors id If you omit the other table and other column references ``sqlite-utils`` will attempt to guess them - so the above example could instead look like this: .. code-block:: bash sqlite-utils add-foreign-key books.db books author_id Add ``--ignore`` to ignore an existing foreign key (as opposed to returning an error): .. code-block:: bash sqlite-utils add-foreign-key books.db books author_id --ignore See :ref:`python_api_add_foreign_key` in the Python API documentation for further details, including how the automatic table guessing mechanism works. .. _cli_add_foreign_keys: Adding multiple foreign keys at once ------------------------------------ Adding a foreign key requires a ``VACUUM``. On large databases this can be an expensive operation, so if you are adding multiple foreign keys you can combine them into one operation (and hence one ``VACUUM``) using ``add-foreign-keys``: .. code-block:: bash sqlite-utils add-foreign-keys books.db \ books author_id authors id \ authors country_id countries id When you are using this command each foreign key needs to be defined in full, as four arguments - the table, column, other table and other column. .. _cli_index_foreign_keys: Adding indexes for all foreign keys ----------------------------------- If you want to ensure that every foreign key column in your database has a corresponding index, you can do so like this: .. code-block:: bash sqlite-utils index-foreign-keys books.db .. _cli_defaults_not_null: Setting defaults and not null constraints ========================================= You can use the ``--not-null`` and ``--default`` options (to both ``insert`` and ``upsert``) to specify columns that should be ``NOT NULL`` or to set database defaults for one or more specific columns: .. code-block:: bash sqlite-utils insert dogs.db dogs_with_scores dogs-with-scores.json \ --not-null=age \ --not-null=name \ --default age 2 \ --default score 5 .. _cli_create_index: Creating indexes ================ You can add an index to an existing table using the ``create-index`` command: .. code-block:: bash sqlite-utils create-index mydb.db mytable col1 [col2...] This can be used to create indexes against a single column or multiple columns. The name of the index will be automatically derived from the table and columns. To specify a different name, use ``--name=name_of_index``. Use the ``--unique`` option to create a unique index. Use ``--if-not-exists`` to avoid attempting to create the index if one with that name already exists. To add an index on a column in descending order, prefix the column with a hyphen. Since this can be confused for a command-line option you need to construct that like this: .. code-block:: bash sqlite-utils create-index mydb.db mytable -- col1 -col2 col3 This will create an index on that table on ``(col1, col2 desc, col3)``. If your column names are already prefixed with a hyphen you'll need to manually execute a ``CREATE INDEX`` SQL statement to add indexes to them rather than using this tool. Add the ``--analyze`` option to run ``ANALYZE`` against the index after it has been created. .. _cli_fts: Configuring full-text search ============================ You can enable SQLite full-text search on a table and a set of columns like this: .. code-block:: bash sqlite-utils enable-fts mydb.db documents title summary This will use SQLite's FTS5 module by default. Use ``--fts4`` if you want to use FTS4: .. code-block:: bash sqlite-utils enable-fts mydb.db documents title summary --fts4 The ``enable-fts`` command will populate the new index with all existing documents. If you later add more documents you will need to use ``populate-fts`` to cause them to be indexed as well: .. code-block:: bash sqlite-utils populate-fts mydb.db documents title summary A better solution here is to use database triggers. You can set up database triggers to automatically update the full-text index using the ``--create-triggers`` option when you first run ``enable-fts``: .. code-block:: bash sqlite-utils enable-fts mydb.db documents title summary --create-triggers To set a custom FTS tokenizer, e.g. to enable Porter stemming, use ``--tokenize=``: .. code-block:: bash sqlite-utils populate-fts mydb.db documents title summary --tokenize=porter To remove the FTS tables and triggers you created, use ``disable-fts``: .. code-block:: bash sqlite-utils disable-fts mydb.db documents To rebuild one or more FTS tables (see :ref:`python_api_fts_rebuild`), use ``rebuild-fts``: .. code-block:: bash sqlite-utils rebuild-fts mydb.db documents You can rebuild every FTS table by running ``rebuild-fts`` without passing any table names: .. code-block:: bash sqlite-utils rebuild-fts mydb.db .. _cli_search: Executing searches ================== Once you have configured full-text search for a table, you can search it using ``sqlite-utils search``: .. code-block:: bash sqlite-utils search mydb.db documents searchterm This command accepts the same output options as ``sqlite-utils query``: ``--table``, ``--csv``, ``--tsv``, ``--nl`` etc. By default it shows the most relevant matches first. You can specify a different sort order using the ``-o`` option, which can take a column or a column followed by ``desc``: .. code-block:: bash # Sort by rowid sqlite-utils search mydb.db documents searchterm -o rowid # Sort by created in descending order sqlite-utils search mydb.db documents searchterm -o 'created desc' SQLite `advanced search syntax `__ is enabled by default. To run a search with automatic quoting applied to the terms to avoid them being potentially interpreted as advanced search syntax use the ``--quote`` option. You can specify a subset of columns to be returned using the ``-c`` option one or more times: .. code-block:: bash sqlite-utils search mydb.db documents searchterm -c title -c created By default all search results will be returned. You can use ``--limit 20`` to return just the first 20 results. Use the ``--sql`` option to output the SQL that would be executed, rather than running the query: .. code-block:: bash sqlite-utils search mydb.db documents searchterm --sql .. code-block:: output with original as ( select rowid, * from [documents] ) select [original].* from [original] join [documents_fts] on [original].rowid = [documents_fts].rowid where [documents_fts] match :query order by [documents_fts].rank .. _cli_enable_counts: Enabling cached counts ====================== ``select count(*)`` queries can take a long time against large tables. ``sqlite-utils`` can speed these up by adding triggers to maintain a ``_counts`` table, see :ref:`python_api_cached_table_counts` for details. The ``sqlite-utils enable-counts`` command can be used to configure these triggers, either for every table in the database or for specific tables. .. code-block:: bash # Configure triggers for every table in the database sqlite-utils enable-counts mydb.db # Configure triggers just for specific tables sqlite-utils enable-counts mydb.db table1 table2 If the ``_counts`` table ever becomes out-of-sync with the actual table counts you can repair it using the ``reset-counts`` command: .. code-block:: bash sqlite-utils reset-counts mydb.db .. _cli_analyze: Optimizing index usage with ANALYZE =================================== The `SQLite ANALYZE command `__ builds a table of statistics which the query planner can use to make better decisions about which indexes to use for a given query. You should run ``ANALYZE`` if your database is large and you do not think your indexes are being efficiently used. To run ``ANALYZE`` against every index in a database, use this: .. code-block:: bash sqlite-utils analyze mydb.db You can run it against specific tables, or against specific named indexes, by passing them as optional arguments: .. code-block:: bash sqlite-utils analyze mydb.db mytable idx_mytable_name You can also run ``ANALYZE`` as part of another command using the ``--analyze`` option. This is supported by the ``create-index``, ``insert`` and ``upsert`` commands. .. _cli_vacuum: Vacuum ====== You can run VACUUM to optimize your database like so: .. code-block:: bash sqlite-utils vacuum mydb.db .. _cli_optimize: Optimize ======== The optimize command can dramatically reduce the size of your database if you are using SQLite full-text search. It runs OPTIMIZE against all of your FTS4 and FTS5 tables, then runs VACUUM. If you just want to run OPTIMIZE without the VACUUM, use the ``--no-vacuum`` flag. .. code-block:: bash # Optimize all FTS tables and then VACUUM sqlite-utils optimize mydb.db # Optimize but skip the VACUUM sqlite-utils optimize --no-vacuum mydb.db To optimize specific tables rather than every FTS table, pass those tables as extra arguments: .. code-block:: bash sqlite-utils optimize mydb.db table_1 table_2 .. _cli_wal: WAL mode ======== You can enable `Write-Ahead Logging `__ for a database file using the ``enable-wal`` command: .. code-block:: bash sqlite-utils enable-wal mydb.db You can disable WAL mode using ``disable-wal``: .. code-block:: bash sqlite-utils disable-wal mydb.db Both of these commands accept one or more database files as arguments. .. _cli_dump: Dumping the database to SQL =========================== The ``dump`` command outputs a SQL dump of the schema and full contents of the specified database file: .. code-block:: bash sqlite-utils dump mydb.db BEGIN TRANSACTION; CREATE TABLE ... ... COMMIT; .. _cli_load_extension: Loading SQLite extensions ========================= Many of these commands have the ability to load additional SQLite extensions using the ``--load-extension=/path/to/extension`` option - use ``--help`` to check for support, e.g. ``sqlite-utils rows --help``. This option can be applied multiple times to load multiple extensions. Since `SpatiaLite `__ is commonly used with SQLite, the value ``spatialite`` is special: it will search for SpatiaLite in the most common installation locations, saving you from needing to remember exactly where that module is located: .. code-block:: bash sqlite-utils memory "select spatialite_version()" --load-extension=spatialite .. code-block:: output [{"spatialite_version()": "4.3.0a"}] .. _cli_spatialite: SpatiaLite helpers ================== `SpatiaLite `_ adds geographic capability to SQLite (similar to how PostGIS builds on PostgreSQL). The `SpatiaLite cookbook `__ is a good resource for learning what's possible with it. You can convert an existing table to a geographic table by adding a geometry column, use the ``sqlite-utils add-geometry-column`` command: .. code-block:: bash sqlite-utils add-geometry-column spatial.db locations geometry --type POLYGON --srid 4326 The table (``locations`` in the example above) must already exist before adding a geometry column. Use ``sqlite-utils create-table`` first, then ``add-geometry-column``. Use the ``--type`` option to specify a geometry type. By default, ``add-geometry-column`` uses a generic ``GEOMETRY``, which will work with any type, though it may not be supported by some desktop GIS applications. Eight (case-insensitive) types are allowed: * POINT * LINESTRING * POLYGON * MULTIPOINT * MULTILINESTRING * MULTIPOLYGON * GEOMETRYCOLLECTION * GEOMETRY .. _cli_spatialite_indexes: Adding spatial indexes ---------------------- Once you have a geometry column, you can speed up bounding box queries by adding a spatial index: .. code-block:: bash sqlite-utils create-spatial-index spatial.db locations geometry See this `SpatiaLite Cookbook recipe `__ for examples of how to use a spatial index. .. _cli_install: Installing packages =================== The :ref:`convert command ` and the :ref:`insert -\\-convert ` and :ref:`query -\\-functions ` options can be provided with a Python script that imports additional modules from the ``sqlite-utils`` environment. You can install packages from PyPI directly into the correct environment using ``sqlite-utils install ``. This is a wrapper around ``pip install``. .. code-block:: bash sqlite-utils install beautifulsoup4 Use ``-U`` to upgrade an existing package. .. _cli_uninstall: Uninstalling packages ===================== You can uninstall packages that were installed using ``sqlite-utils install`` with ``sqlite-utils uninstall ``: .. code-block:: bash sqlite-utils uninstall beautifulsoup4 Use ``-y`` to skip the request for confirmation. .. _cli_tui: Experimental TUI ================ A TUI is a "text user interface" (or "terminal user interface") - a keyboard and mouse driven graphical interface running in your terminal. ``sqlite-utils`` has experimental support for a TUI for building command-line invocations, built on top of the `Trogon `__ TUI library. To enable this feature you will need to install the ``trogon`` dependency. You can do that like so: .. code-block:: bash sqite-utils install trogon Once installed, running the ``sqlite-utils tui`` command will launch the TUI interface: .. code-block:: bash sqlite-utils tui You can then construct a command by selecting options from the menus, and execute it using ``Ctrl+R``. .. image:: _static/img/tui.png :alt: A TUI interface for sqlite-utils - the left column shows a list of commands, while the right panel has a form for constructing arguments to the add-column command. sqlite-utils-3.35.2/docs/codespell-ignore-words.txt000066400000000000000000000000051452131415600223200ustar00rootroot00000000000000doub sqlite-utils-3.35.2/docs/conf.py000066400000000000000000000130621452131415600164760ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- from subprocess import Popen, PIPE from beanbag_docutils.sphinx.ext.github import github_linkcode_resolve # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.extlinks", "sphinx.ext.autodoc", "sphinx_copybutton", "sphinx.ext.linkcode", ] autodoc_member_order = "bysource" autodoc_typehints = "description" extlinks = { "issue": ("https://github.com/simonw/sqlite-utils/issues/%s", "#%s"), } def linkcode_resolve(domain, info): return github_linkcode_resolve( domain=domain, info=info, allowed_module_names=["sqlite_utils"], github_org_id="simonw", github_repo_id="sqlite-utils", branch="main", ) # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "sqlite-utils" copyright = "2018-2022, Simon Willison" author = "Simon Willison" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. pipe = Popen("git describe --tags --always", stdout=PIPE, shell=True) git_version = pipe.stdout.read().decode("utf8") if git_version: version = git_version.rsplit("-", 1)[0] release = git_version else: version = "" release = "" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # Only syntax highlight of code-block is used: highlight_language = "none" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "furo" html_title = "sqlite-utils" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_js_files = ["js/custom.js"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "sqlite-utils-doc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "sqlite-utils.tex", "sqlite-utils documentation", "Simon Willison", "manual", ) ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "sqlite-utils", "sqlite-utils documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "sqlite-utils", "sqlite-utils documentation", author, "sqlite-utils", "Python library for manipulating SQLite databases", "Miscellaneous", ) ] sqlite-utils-3.35.2/docs/contributing.rst000066400000000000000000000124771452131415600204510ustar00rootroot00000000000000.. _contributing: ============== Contributing ============== Development of ``sqlite-utils`` takes place in the `sqlite-utils GitHub repository `__. All improvements to the software should start with an issue. Read `How I build a feature `__ for a detailed description of the recommended process for building bug fixes or enhancements. .. _contributing_checkout: Obtaining the code ================== To work on this library locally, first checkout the code. Then create a new virtual environment:: git clone git@github.com:simonw/sqlite-utils cd sqlite-utils python3 -mvenv venv source venv/bin/activate Or if you are using ``pipenv``:: pipenv shell Within the virtual environment running ``sqlite-utils`` should run your locally editable version of the tool. You can use ``which sqlite-utils`` to confirm that you are running the version that lives in your virtual environment. .. _contributing_tests: Running the tests ================= To install the dependencies and test dependencies:: pip install -e '.[test]' To run the tests:: pytest .. _contributing_docs: Building the documentation ========================== To build the documentation, first install the documentation dependencies:: pip install -e '.[docs]' Then run ``make livehtml`` from the ``docs/`` directory to start a server on port 8000 that will serve the documentation and live-reload any time you make an edit to a ``.rst`` file:: cd docs make livehtml The `cog `__ tool is used to maintain portions of the documentation. You can run it like so:: cog -r docs/*.rst .. _contributing_linting: Linting and formatting ====================== ``sqlite-utils`` uses `Black `__ for code formatting, and `flake8 `__ and `mypy `__ for linting and type checking. Black is installed as part of ``pip install -e '.[test]'`` - you can then format your code by running it in the root of the project:: black . To install ``mypy`` and ``flake8`` run the following:: pip install -e '.[flake8,mypy]' Both commands can then be run in the root of the project like this:: flake8 mypy sqlite_utils All three of these tools are run by our CI mechanism against every commit and pull request. .. _contributing_just: Using Just and pipenv ===================== If you install `Just `__ and `pipenv `__ you can use them to manage your local development environment. To create a virtual environment and install all development dependencies, run:: cd sqlite-utils just init To run all of the tests and linters:: just To run tests, or run a specific test module or test by name:: just test # All tests just test tests/test_cli_memory.py # Just this module just test -k test_memory_no_detect_types # Just this test To run just the linters:: just lint To apply Black to your code:: just black To update documentation using Cog:: just cog To run the live documentation server (this will run Cog first):: just docs And to list all available commands:: just -l .. _release_process: Release process =============== Releases are performed using tags. When a new release is published on GitHub, a `GitHub Actions workflow `__ will perform the following: * Run the unit tests against all supported Python versions. If the tests pass... * Build a wheel bundle of the underlying Python source code * Push that new wheel up to PyPI: https://pypi.org/project/sqlite-utils/ To deploy new releases you will need to have push access to the GitHub repository. ``sqlite-utils`` follows `Semantic Versioning `__:: major.minor.patch We increment ``major`` for backwards-incompatible releases. We increment ``minor`` for new features. We increment ``patch`` for bugfix releass. To release a new version, first create a commit that updates the version number in ``setup.py`` and the :ref:`the changelog ` with highlights of the new version. An example `commit can be seen here `__:: # Update changelog git commit -m " Release 3.29 Refs #423, #458, #467, #469, #470, #471, #472, #475" -a git push Referencing the issues that are part of the release in the commit message ensures the name of the release shows up on those issue pages, e.g. `here `__. You can generate the list of issue references for a specific release by copying and pasting text from the release notes or GitHub changes-since-last-release view into this `Extract issue numbers from pasted text `__ tool. To create the tag for the release, create `a new release `__ on GitHub matching the new version number. You can convert the release notes to Markdown by copying and pasting the rendered HTML into this `Paste to Markdown tool `__. sqlite-utils-3.35.2/docs/index.rst000066400000000000000000000032651452131415600170440ustar00rootroot00000000000000======================= sqlite-utils |version| ======================= |PyPI| |Changelog| |CI| |License| |discord| .. |PyPI| image:: https://img.shields.io/pypi/v/sqlite-utils.svg :target: https://pypi.org/project/sqlite-utils/ .. |Changelog| image:: https://img.shields.io/github/v/release/simonw/sqlite-utils?include_prereleases&label=changelog :target: https://sqlite-utils.datasette.io/en/stable/changelog.html .. |CI| image:: https://github.com/simonw/sqlite-utils/workflows/Test/badge.svg :target: https://github.com/simonw/sqlite-utils/actions .. |License| image:: https://img.shields.io/badge/license-Apache%202.0-blue.svg :target: https://github.com/simonw/sqlite-utils/blob/main/LICENSE .. |discord| image:: https://img.shields.io/discord/823971286308356157?label=discord :target: https://discord.gg/Ass7bCAMDw *CLI tool and Python library for manipulating SQLite databases* This library and command-line utility helps create SQLite databases from an existing collection of data. Most of the functionality is available as either a Python API or through the ``sqlite-utils`` command-line tool. sqlite-utils is not intended to be a full ORM: the focus is utility helpers to make creating the initial database and populating it with data as productive as possible. It is designed as a useful complement to `Datasette `_. `Cleaning data with sqlite-utils and Datasette `_ provides a tutorial introduction (and accompanying ten minute video) about using this tool. Contents -------- .. toctree:: :maxdepth: 3 installation cli python-api plugins reference cli-reference contributing changelog sqlite-utils-3.35.2/docs/installation.rst000066400000000000000000000060001452131415600204240ustar00rootroot00000000000000.. _installation: ============== Installation ============== ``sqlite-utils`` is tested on Linux, macOS and Windows. .. _installation_homebrew: Using Homebrew ============== The :ref:`sqlite-utils command-line tool ` can be installed on macOS using Homebrew:: brew install sqlite-utils If you have it installed and want to upgrade to the most recent release, you can run:: brew upgrade sqlite-utils Then run ``sqlite-utils --version`` to confirm the installed version. .. _installation_pip: Using pip ========= The `sqlite-utils package `__ on PyPI includes both the :ref:`sqlite_utils Python library ` and the ``sqlite-utils`` command-line tool. You can install them using ``pip`` like so:: pip install sqlite-utils .. _installation_pipx: Using pipx ========== `pipx `__ is a tool for installing Python command-line applications in their own isolated environments. You can use ``pipx`` to install the ``sqlite-utils`` command-line tool like this:: pipx install sqlite-utils .. _installation_sqlite3_alternatives: Alternatives to sqlite3 ======================= By default, ``sqlite-utils`` uses the ``sqlite3`` package bundled with the Python standard library. Depending on your operating system, this may come with some limitations. On some platforms the ability to load additional extensions (via ``conn.load_extension(...)`` or ``--load-extension=/path/to/extension``) may be disabled. You may also see the error ``sqlite3.OperationalError: table sqlite_master may not be modified`` when trying to alter an existing table. You can work around these limitations by installing either the `pysqlite3 `__ package or the `sqlean.py `__ package, both of which provide drop-in replacements for the standard library ``sqlite3`` module but with a recent version of SQLite and full support for loading extensions. To install ``sqlean.py`` (which has compiled binary wheels available for all major platforms) run the following: .. code-block:: bash sqlite-utils install sqlean.py ``pysqlite3`` and ``sqlean.py`` do not provide implementations of the ``.iterdump()`` method. To use that method (see :ref:`python_api_itedump`) or the ``sqlite-utils dump`` command you should also install the ``sqlite-dump`` package: .. code-block:: bash sqlite-utils install sqlite-dump .. _installation_completion: Setting up shell completion =========================== You can configure shell tab completion for the ``sqlite-utils`` command using these commands. For ``bash``: .. code-block:: bash eval "$(_SQLITE_UTILS_COMPLETE=bash_source sqlite-utils)" For ``zsh``: .. code-block:: zsh eval "$(_SQLITE_UTILS_COMPLETE=zsh_source sqlite-utils)" Add this code to ``~/.zshrc`` or ``~/.bashrc`` to automatically run it when you start a new shell. See `the Click documentation `__ for more details.sqlite-utils-3.35.2/docs/plugins.rst000066400000000000000000000072331452131415600174150ustar00rootroot00000000000000.. _plugins: ========= Plugins ========= ``sqlite-utils`` supports plugins, which can be used to add extra features to the software. Plugins can add new commands, for example ``sqlite-utils some-command ...`` Plugins can be installed using the ``sqlite-utils install`` command: .. code-block:: bash sqlite-utils install sqlite-utils-name-of-plugin You can see a JSON list of plugins that have been installed by running this: .. code-block:: bash sqlite-utils plugins Plugin hooks such as :ref:`plugins_hooks_prepare_connection` affect each instance of the ``Database`` class. You can opt-out of these plugins by creating that class instance like so: .. code-block:: python db = Database(memory=True, execute_plugins=False) .. _plugins_building: Building a plugin ----------------- Plugins are created in a directory named after the plugin. To create a "hello world" plugin, first create a ``hello-world`` directory: .. code-block:: bash mkdir hello-world cd hello-world In that folder create two files. The first is a ``pyproject.toml`` file describing the plugin: .. code-block:: toml [project] name = "sqlite-utils-hello-world" version = "0.1" [project.entry-points.sqlite_utils] hello_world = "sqlite_utils_hello_world" The ```[project.entry-points.sqlite_utils]`` section tells ``sqlite-tils`` which module to load when executing the plugin. Then create ``sqlite_utils_hello_world.py`` with the following content: .. code-block:: python import click import sqlite_utils @sqlite_utils.hookimpl def register_commands(cli): @cli.command() def hello_world(): "Say hello world" click.echo("Hello world!") Install the plugin in "editable" mode - so you can make changes to the code and have them picked up instantly by ``sqlite-utils`` - like this: .. code-block:: bash sqlite-utils install -e . Or pass the path to your plugin directory: .. code-block:: bash sqlite-utils install -e `/dev/sqlite-utils-hello-world Now, running this should execute your new command: .. code-block:: bash sqlite-utils hello-world Your command will also be listed in the output of ``sqlite-utils --help``. See the `LLM plugin documentation `__ for tips on distributing your plugin. .. _plugins_hooks: Plugin hooks ------------ Plugin hooks allow ``sqlite-utils`` to be customized. .. _plugins_hooks_register_commands: register_commands(cli) ~~~~~~~~~~~~~~~~~~~~~~ This hook can be used to register additional commands with the ``sqlite-utils`` CLI. It is called with the ``cli`` object, which is a ``click.Group`` instance. Example implementation: .. code-block:: python import click import sqlite_utils @sqlite_utils.hookimpl def register_commands(cli): @cli.command() def hello_world(): "Say hello world" click.echo("Hello world!") .. _plugins_hooks_prepare_connection: prepare_connection(conn) ~~~~~~~~~~~~~~~~~~~~~~~~ This hook is called when a new SQLite database connection is created. You can use it to `register custom SQL functions `_, aggregates and collations. For example: .. code-block:: python import sqlite_utils @sqlite_utils.hookimpl def prepare_connection(conn): conn.create_function( "hello", 1, lambda name: f"Hello, {name}!" ) This registers a SQL function called ``hello`` which takes a single argument and can be called like this: .. code-block:: sql select hello("world"); -- "Hello, world!" sqlite-utils-3.35.2/docs/python-api.rst000066400000000000000000003035501452131415600200250ustar00rootroot00000000000000.. _python_api: ============================= sqlite_utils Python library ============================= .. contents:: :local: :class: this-will-duplicate-information-and-it-is-still-useful-here .. _python_api_getting_started: Getting started =============== Here's how to create a new SQLite database file containing a new ``chickens`` table, populated with four records: .. code-block:: python from sqlite_utils import Database db = Database("chickens.db") db["chickens"].insert_all([{ "name": "Azi", "color": "blue", }, { "name": "Lila", "color": "blue", }, { "name": "Suna", "color": "gold", }, { "name": "Cardi", "color": "black", }]) You can loop through those rows like this: .. code-block:: python for row in db["chickens"].rows: print(row) Which outputs the following:: {'name': 'Azi', 'color': 'blue'} {'name': 'Lila', 'color': 'blue'} {'name': 'Suna', 'color': 'gold'} {'name': 'Cardi', 'color': 'black'} To run a SQL query, use :ref:`db.query() `: .. code-block:: python for row in db.query(""" select color, count(*) from chickens group by color order by count(*) desc """): print(row) Which outputs:: {'color': 'blue', 'count(*)': 2} {'color': 'gold', 'count(*)': 1} {'color': 'black', 'count(*)': 1} .. _python_api_connect: Connecting to or creating a database ==================================== Database objects are constructed by passing in either a path to a file on disk or an existing SQLite3 database connection: .. code-block:: python from sqlite_utils import Database db = Database("my_database.db") This will create ``my_database.db`` if it does not already exist. If you want to recreate a database from scratch (first removing the existing file from disk if it already exists) you can use the ``recreate=True`` argument: .. code-block:: python db = Database("my_database.db", recreate=True) Instead of a file path you can pass in an existing SQLite connection: .. code-block:: python import sqlite3 db = Database(sqlite3.connect("my_database.db")) If you want to create an in-memory database, you can do so like this: .. code-block:: python db = Database(memory=True) You can also create a named in-memory database. Unlike regular memory databases these can be accessed by multiple threads, provided at least one reference to the database still exists. `del db` will clear the database from memory. .. code-block:: python db = Database(memory_name="my_shared_database") Connections use ``PRAGMA recursive_triggers=on`` by default. If you don't want to use `recursive triggers `__ you can turn them off using: .. code-block:: python db = Database(memory=True, recursive_triggers=False) By default, any :ref:`sqlite-utils plugins ` that implement the :ref:`plugins_hooks_prepare_connection` hook will be executed against the connection when you create the ``Database`` object. You can opt out of executing plugins using ``execute_plugins=False`` like this: .. code-block:: python db = Database(memory=True, execute_plugins=False) .. _python_api_attach: Attaching additional databases ------------------------------ SQLite supports cross-database SQL queries, which can join data from tables in more than one database file. You can attach an additional database using the ``.attach()`` method, providing an alias to use for that database and the path to the SQLite file on disk. .. code-block:: python db = Database("first.db") db.attach("second", "second.db") # Now you can run queries like this one: print(db.query(""" select * from table_in_first union all select * from second.table_in_second """)) You can reference tables in the attached database using the alias value you passed to ``db.attach(alias, filepath)`` as a prefix, for example the ``second.table_in_second`` reference in the SQL query above. .. _python_api_tracing: Tracing queries --------------- You can use the ``tracer`` mechanism to see SQL queries that are being executed by SQLite. A tracer is a function that you provide which will be called with ``sql`` and ``params`` arguments every time SQL is executed, for example: .. code-block:: python def tracer(sql, params): print("SQL: {} - params: {}".format(sql, params)) You can pass this function to the ``Database()`` constructor like so: .. code-block:: python db = Database(memory=True, tracer=tracer) You can also turn on a tracer function temporarily for a block of code using the ``with db.tracer(...)`` context manager: .. code-block:: python db = Database(memory=True) # ... later with db.tracer(print): db["dogs"].insert({"name": "Cleo"}) This example will print queries only for the duration of the ``with`` block. .. _python_api_executing_queries: Executing queries ================= The ``Database`` class offers several methods for directly executing SQL queries. .. _python_api_query: db.query(sql, params) --------------------- The ``db.query(sql)`` function executes a SQL query and returns an iterator over Python dictionaries representing the resulting rows: .. code-block:: python db = Database(memory=True) db["dogs"].insert_all([{"name": "Cleo"}, {"name": "Pancakes"}]) for row in db.query("select * from dogs"): print(row) # Outputs: # {'name': 'Cleo'} # {'name': 'Pancakes'} .. _python_api_execute: db.execute(sql, params) ----------------------- The ``db.execute()`` and ``db.executescript()`` methods provide wrappers around ``.execute()`` and ``.executescript()`` on the underlying SQLite connection. These wrappers log to the :ref:`tracer function ` if one has been registered. ``db.execute(sql)`` returns a `sqlite3.Cursor `__ that was used to execute the SQL. .. code-block:: python db = Database(memory=True) db["dogs"].insert({"name": "Cleo"}) cursor = db.execute("update dogs set name = 'Cleopaws'") print(cursor.rowcount) # Outputs the number of rows affected by the update # In this case 2 Other cursor methods such as ``.fetchone()`` and ``.fetchall()`` are also available, see the `standard library documentation `__. .. _python_api_parameters: Passing parameters ------------------ Both ``db.query()`` and ``db.execute()`` accept an optional second argument for parameters to be passed to the SQL query. This can take the form of either a tuple/list or a dictionary, depending on the type of parameters used in the query. Values passed in this way will be correctly quoted and escaped, helping avoid SQL injection vulnerabilities. ``?`` parameters in the SQL query can be filled in using a list: .. code-block:: python db.execute("update dogs set name = ?", ["Cleopaws"]) # This will rename ALL dogs to be called "Cleopaws" Named parameters using ``:name`` can be filled using a dictionary: .. code-block:: python dog = next(db.query( "select rowid, name from dogs where name = :name", {"name": "Cleopaws"} )) # dog is now {'rowid': 1, 'name': 'Cleopaws'} In this example ``next()`` is used to retrieve the first result in the iterator returned by the ``db.query()`` method. .. _python_api_table: Accessing tables ================ Tables are accessed using the indexing operator, like so: .. code-block:: python table = db["my_table"] If the table does not yet exist, it will be created the first time you attempt to insert or upsert data into it. You can also access tables using the ``.table()`` method like so: .. code-block:: python table = db.table("my_table") Using this factory function allows you to set :ref:`python_api_table_configuration`. .. _python_api_tables: Listing tables ============== You can list the names of tables in a database using the ``.table_names()`` method:: >>> db.table_names() ['dogs'] To see just the FTS4 tables, use ``.table_names(fts4=True)``. For FTS5, use ``.table_names(fts5=True)``. You can also iterate through the table objects themselves using the ``.tables`` property:: >>> db.tables [] .. _python_api_views: Listing views ============= ``.view_names()`` shows you a list of views in the database:: >>> db.view_names() ['good_dogs'] You can iterate through view objects using the ``.views`` property:: >>> db.views [] View objects are similar to Table objects, except that any attempts to insert or update data will throw an error. The full list of methods and properties available on a view object is as follows: * ``columns`` * ``columns_dict`` * ``count`` * ``schema`` * ``rows`` * ``rows_where(where, where_args, order_by, select)`` * ``drop()`` .. _python_api_rows: Listing rows ============ To iterate through dictionaries for each of the rows in a table, use ``.rows``:: >>> db = sqlite_utils.Database("dogs.db") >>> for row in db["dogs"].rows: ... print(row) {'id': 1, 'age': 4, 'name': 'Cleo'} {'id': 2, 'age': 2, 'name': 'Pancakes'} You can filter rows by a WHERE clause using ``.rows_where(where, where_args)``:: >>> db = sqlite_utils.Database("dogs.db") >>> for row in db["dogs"].rows_where("age > ?", [3]): ... print(row) {'id': 1, 'age': 4, 'name': 'Cleo'} The first argument is a fragment of SQL. The second, optional argument is values to be passed to that fragment - you can use ``?`` placeholders and pass an array, or you can use ``:named`` parameters and pass a dictionary, like this:: >>> for row in db["dogs"].rows_where("age > :age", {"age": 3}): ... print(row) {'id': 1, 'age': 4, 'name': 'Cleo'} To return custom columns (instead of the default that uses ``select *``) pass ``select="column1, column2"``:: >>> db = sqlite_utils.Database("dogs.db") >>> for row in db["dogs"].rows_where(select='name, age'): ... print(row) {'name': 'Cleo', 'age': 4} To specify an order, use the ``order_by=`` argument:: >>> for row in db["dogs"].rows_where("age > 1", order_by="age"): ... print(row) {'id': 2, 'age': 2, 'name': 'Pancakes'} {'id': 1, 'age': 4, 'name': 'Cleo'} You can use ``order_by="age desc"`` for descending order. You can order all records in the table by excluding the ``where`` argument:: >>> for row in db["dogs"].rows_where(order_by="age desc"): ... print(row) {'id': 1, 'age': 4, 'name': 'Cleo'} {'id': 2, 'age': 2, 'name': 'Pancakes'} This method also accepts ``offset=`` and ``limit=`` arguments, for specifying an OFFSET and a LIMIT for the SQL query:: >>> for row in db["dogs"].rows_where(order_by="age desc", limit=1): ... print(row) {'id': 1, 'age': 4, 'name': 'Cleo'} .. _python_api_rows_count_where: Counting rows ------------- To count the number of rows that would be returned by a where filter, use ``.count_where(where, where_args)``: >>> db["dogs"].count_where("age > ?", [1]) 2 .. _python_api_pks_and_rows_where: Listing rows with their primary keys ==================================== Sometimes it can be useful to retrieve the primary key along with each row, in order to pass that key (or primary key tuple) to the ``.get()`` or ``.update()`` methods. The ``.pks_and_rows_where()`` method takes the same signature as ``.rows_where()`` (with the exception of the ``select=`` parameter) but returns a generator that yields pairs of ``(primary key, row dictionary)``. The primary key value will usually be a single value but can also be a tuple if the table has a compound primary key. If the table is a ``rowid`` table (with no explicit primary key column) then that ID will be returned. :: >>> db = sqlite_utils.Database(memory=True) >>> db["dogs"].insert({"name": "Cleo"}) >>> for pk, row in db["dogs"].pks_and_rows_where(): ... print(pk, row) 1 {'rowid': 1, 'name': 'Cleo'} >>> db["dogs_with_pk"].insert({"id": 5, "name": "Cleo"}, pk="id") >>> for pk, row in db["dogs_with_pk"].pks_and_rows_where(): ... print(pk, row) 5 {'id': 5, 'name': 'Cleo'} >>> db["dogs_with_compound_pk"].insert( ... {"species": "dog", "id": 3, "name": "Cleo"}, ... pk=("species", "id") ... ) >>> for pk, row in db["dogs_with_compound_pk"].pks_and_rows_where(): ... print(pk, row) ('dog', 3) {'species': 'dog', 'id': 3, 'name': 'Cleo'} .. _python_api_get: Retrieving a specific record ============================ You can retrieve a record by its primary key using ``table.get()``:: >>> db = sqlite_utils.Database("dogs.db") >>> print(db["dogs"].get(1)) {'id': 1, 'age': 4, 'name': 'Cleo'} If the table has a compound primary key you can pass in the primary key values as a tuple:: >>> db["compound_dogs"].get(("mixed", 3)) If the record does not exist a ``NotFoundError`` will be raised: .. code-block:: python from sqlite_utils.db import NotFoundError try: row = db["dogs"].get(5) except NotFoundError: print("Dog not found") .. _python_api_schema: Showing the schema ================== The ``db.schema`` property returns the full SQL schema for the database as a string:: >>> db = sqlite_utils.Database("dogs.db") >>> print(db.schema) CREATE TABLE "dogs" ( [id] INTEGER PRIMARY KEY, [name] TEXT ); .. _python_api_creating_tables: Creating tables =============== The easiest way to create a new table is to insert a record into it: .. code-block:: python from sqlite_utils import Database import sqlite3 db = Database("dogs.db") dogs = db["dogs"] dogs.insert({ "name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True, }) This will automatically create a new table called "dogs" with the following schema:: CREATE TABLE dogs ( name TEXT, twitter TEXT, age INTEGER, is_good_dog INTEGER ) You can also specify a primary key by passing the ``pk=`` parameter to the ``.insert()`` call. This will only be obeyed if the record being inserted causes the table to be created: .. code-block:: python dogs.insert({ "id": 1, "name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True, }, pk="id") After inserting a row like this, the ``dogs.last_rowid`` property will return the SQLite ``rowid`` assigned to the most recently inserted record. The ``dogs.last_pk`` property will return the last inserted primary key value, if you specified one. This can be very useful when writing code that creates foreign keys or many-to-many relationships. .. _python_api_custom_columns: Custom column order and column types ------------------------------------ The order of the columns in the table will be derived from the order of the keys in the dictionary, provided you are using Python 3.6 or later. If you want to explicitly set the order of the columns you can do so using the ``column_order=`` parameter: .. code-block:: python db["dogs"].insert({ "id": 1, "name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True, }, pk="id", column_order=("id", "twitter", "name")) You don't need to pass all of the columns to the ``column_order`` parameter. If you only pass a subset of the columns the remaining columns will be ordered based on the key order of the dictionary. Column types are detected based on the example data provided. Sometimes you may find you need to over-ride these detected types - to create an integer column for data that was provided as a string for example, or to ensure that a table where the first example was ``None`` is created as an ``INTEGER`` rather than a ``TEXT`` column. You can do this using the ``columns=`` parameter: .. code-block:: python db["dogs"].insert({ "id": 1, "name": "Cleo", "age": "5", }, pk="id", columns={"age": int, "weight": float}) This will create a table with the following schema: .. code-block:: sql CREATE TABLE [dogs] ( [id] INTEGER PRIMARY KEY, [name] TEXT, [age] INTEGER, [weight] FLOAT ) .. _python_api_explicit_create: Explicitly creating a table --------------------------- You can directly create a new table without inserting any data into it using the ``.create()`` method: .. code-block:: python db["cats"].create({ "id": int, "name": str, "weight": float, }, pk="id") The first argument here is a dictionary specifying the columns you would like to create. Each column is paired with a Python type indicating the type of column. See :ref:`python_api_add_column` for full details on how these types work. This method takes optional arguments ``pk=``, ``column_order=``, ``foreign_keys=``, ``not_null=set()`` and ``defaults=dict()`` - explained below. A ``sqlite_utils.utils.sqlite3.OperationalError`` will be raised if a table of that name already exists. You can pass ``ignore=True`` to ignore that error. You can also use ``if_not_exists=True`` to use the SQL ``CREATE TABLE IF NOT EXISTS`` pattern to achieve the same effect: .. code-block:: python db["cats"].create({ "id": int, "name": str, }, pk="id", if_not_exists=True) To drop and replace any existing table of that name, pass ``replace=True``. This is a **dangerous operation** that will result in loss of existing data in the table. You can also pass ``transform=True`` to have any existing tables :ref:`transformed ` to match your new table specification. This is a **dangerous operation** as it will drop columns that are no longer listed in your call to ``.create()``, so be careful when running this. .. code-block:: python db["cats"].create({ "id": int, "name": str, "weight": float, }, pk="id", transform=True) The ``transform=True`` option will update the table schema if any of the following have changed: - The specified columns or their types - The specified primary key - The order of the columns, defined using ``column_order=`` - The ``not_null=`` or ``defaults=`` arguments Changes to ``foreign_keys=`` are not currently detected and applied by ``transform=True``. .. _python_api_compound_primary_keys: Compound primary keys --------------------- If you want to create a table with a compound primary key that spans multiple columns, you can do so by passing a tuple of column names to any of the methods that accept a ``pk=`` parameter. For example: .. code-block:: python db["cats"].create({ "id": int, "breed": str, "name": str, "weight": float, }, pk=("breed", "id")) This also works for the ``.insert()``, ``.insert_all()``, ``.upsert()`` and ``.upsert_all()`` methods. .. _python_api_foreign_keys: Specifying foreign keys ----------------------- Any operation that can create a table (``.create()``, ``.insert()``, ``.insert_all()``, ``.upsert()`` and ``.upsert_all()``) accepts an optional ``foreign_keys=`` argument which can be used to set up foreign key constraints for the table that is being created. If you are using your database with `Datasette `__, Datasette will detect these constraints and use them to generate hyperlinks to associated records. The ``foreign_keys`` argument takes a list that indicates which foreign keys should be created. The list can take several forms. The simplest is a list of columns: .. code-block:: python foreign_keys=["author_id"] The library will guess which tables you wish to reference based on the column names using the rules described in :ref:`python_api_add_foreign_key`. You can also be more explicit, by passing in a list of tuples: .. code-block:: python foreign_keys=[ ("author_id", "authors", "id") ] This means that the ``author_id`` column should be a foreign key that references the ``id`` column in the ``authors`` table. You can leave off the third item in the tuple to have the referenced column automatically set to the primary key of that table. A full example: .. code-block:: python db["authors"].insert_all([ {"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"} ], pk="id") db["books"].insert_all([ {"title": "Hedgehogs of the world", "author_id": 1}, {"title": "How to train your wolf", "author_id": 2}, ], foreign_keys=[ ("author_id", "authors") ]) .. _python_api_table_configuration: Table configuration options --------------------------- The ``.insert()``, ``.upsert()``, ``.insert_all()`` and ``.upsert_all()`` methods each take a number of keyword arguments, some of which influence what happens should they cause a table to be created and some of which affect the behavior of those methods. You can set default values for these methods by accessing the table through the ``db.table(...)`` method (instead of using ``db["table_name"]``), like so: .. code-block:: python table = db.table( "authors", pk="id", not_null={"name", "score"}, column_order=("id", "name", "score", "url") ) # Now you can call .insert() like so: table.insert({"id": 1, "name": "Tracy", "score": 5}) The configuration options that can be specified in this way are ``pk``, ``foreign_keys``, ``column_order``, ``not_null``, ``defaults``, ``batch_size``, ``hash_id``, ``hash_id_columns``, ``alter``, ``ignore``, ``replace``, ``extracts``, ``conversions``, ``columns``. These are all documented below. .. _python_api_defaults_not_null: Setting defaults and not null constraints ----------------------------------------- Each of the methods that can cause a table to be created take optional arguments ``not_null=set()`` and ``defaults=dict()``. The methods that take these optional arguments are: * ``db.create_table(...)`` * ``table.create(...)`` * ``table.insert(...)`` * ``table.insert_all(...)`` * ``table.upsert(...)`` * ``table.upsert_all(...)`` You can use ``not_null=`` to pass a set of column names that should have a ``NOT NULL`` constraint set on them when they are created. You can use ``defaults=`` to pass a dictionary mapping columns to the default value that should be specified in the ``CREATE TABLE`` statement. Here's an example that uses these features: .. code-block:: python db["authors"].insert_all( [{"id": 1, "name": "Sally", "score": 2}], pk="id", not_null={"name", "score"}, defaults={"score": 1}, ) db["authors"].insert({"name": "Dharma"}) list(db["authors"].rows) # Outputs: # [{'id': 1, 'name': 'Sally', 'score': 2}, # {'id': 3, 'name': 'Dharma', 'score': 1}] print(db["authors"].schema) # Outputs: # CREATE TABLE [authors] ( # [id] INTEGER PRIMARY KEY, # [name] TEXT NOT NULL, # [score] INTEGER NOT NULL DEFAULT 1 # ) .. _python_api_rename_table: Renaming a table ================ The ``db.rename_table(old_name, new_name)`` method can be used to rename a table: .. code-block:: python db.rename_table("my_table", "new_name_for_my_table") This executes the following SQL: .. code-block:: sql ALTER TABLE [my_table] RENAME TO [new_name_for_my_table] .. _python_api_duplicate: Duplicating tables ================== The ``table.duplicate()`` method creates a copy of the table, copying both the table schema and all of the rows in that table: .. code-block:: python db["authors"].duplicate("authors_copy") The new ``authors_copy`` table will now contain a duplicate copy of the data from ``authors``. This method raises ``sqlite_utils.db.NoTable`` if the table does not exist. .. _python_api_bulk_inserts: Bulk inserts ============ If you have more than one record to insert, the ``insert_all()`` method is a much more efficient way of inserting them. Just like ``insert()`` it will automatically detect the columns that should be created, but it will inspect the first batch of 100 items to help decide what those column types should be. Use it like this: .. code-block:: python db["dogs"].insert_all([{ "id": 1, "name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True, }, { "id": 2, "name": "Marnie", "twitter": "MarnieTheDog", "age": 16, "is_good_dog": True, }], pk="id", column_order=("id", "twitter", "name")) The column types used in the ``CREATE TABLE`` statement are automatically derived from the types of data in that first batch of rows. Any additional columns in subsequent batches will cause a ``sqlite3.OperationalError`` exception to be raised unless the ``alter=True`` argument is supplied, in which case the new columns will be created. The function can accept an iterator or generator of rows and will commit them according to the batch size. The default batch size is 100, but you can specify a different size using the ``batch_size`` parameter: .. code-block:: python db["big_table"].insert_all(({ "id": 1, "name": "Name {}".format(i), } for i in range(10000)), batch_size=1000) You can skip inserting any records that have a primary key that already exists using ``ignore=True``. This works with both ``.insert({...}, ignore=True)`` and ``.insert_all([...], ignore=True)``. You can delete all the existing rows in the table before inserting the new records using ``truncate=True``. This is useful if you want to replace the data in the table. Pass ``analyze=True`` to run ``ANALYZE`` against the table after inserting the new records. .. _python_api_insert_replace: Insert-replacing data ===================== If you try to insert data using a primary key that already exists, the ``.insert()`` or ``.insert_all()`` method will raise a ``sqlite3.IntegrityError`` exception. This example that catches that exception: .. code-block:: python from sqlite_utils.utils import sqlite3 try: db["dogs"].insert({"id": 1, "name": "Cleo"}, pk="id") except sqlite3.IntegrityError: print("Record already exists with that primary key") Importing from ``sqlite_utils.utils.sqlite3`` ensures your code continues to work even if you are using the ``pysqlite3`` library instead of the Python standard library ``sqlite3`` module. Use the ``ignore=True`` parameter to ignore this error: .. code-block:: python # This fails silently if a record with id=1 already exists db["dogs"].insert({"id": 1, "name": "Cleo"}, pk="id", ignore=True) To replace any existing records that have a matching primary key, use the ``replace=True`` parameter to ``.insert()`` or ``.insert_all()``: .. code-block:: python db["dogs"].insert_all([{ "id": 1, "name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True, }, { "id": 2, "name": "Marnie", "twitter": "MarnieTheDog", "age": 16, "is_good_dog": True, }], pk="id", replace=True) .. note:: Prior to sqlite-utils 2.0 the ``.upsert()`` and ``.upsert_all()`` methods worked the same way as ``.insert(replace=True)`` does today. See :ref:`python_api_upsert` for the new behaviour of those methods introduced in 2.0. .. _python_api_update: Updating a specific record ========================== You can update a record by its primary key using ``table.update()``:: >>> db = sqlite_utils.Database("dogs.db") >>> print(db["dogs"].get(1)) {'id': 1, 'age': 4, 'name': 'Cleo'} >>> db["dogs"].update(1, {"age": 5}) >>> print(db["dogs"].get(1)) {'id': 1, 'age': 5, 'name': 'Cleo'} The first argument to ``update()`` is the primary key. This can be a single value, or a tuple if that table has a compound primary key:: >>> db["compound_dogs"].update((5, 3), {"name": "Updated"}) The second argument is a dictionary of columns that should be updated, along with their new values. You can cause any missing columns to be added automatically using ``alter=True``:: >>> db["dogs"].update(1, {"breed": "Mutt"}, alter=True) .. _python_api_delete: Deleting a specific record ========================== You can delete a record using ``table.delete()``:: >>> db = sqlite_utils.Database("dogs.db") >>> db["dogs"].delete(1) The ``delete()`` method takes the primary key of the record. This can be a tuple of values if the row has a compound primary key:: >>> db["compound_dogs"].delete((5, 3)) .. _python_api_delete_where: Deleting multiple records ========================= You can delete all records in a table that match a specific WHERE statement using ``table.delete_where()``:: >>> db = sqlite_utils.Database("dogs.db") >>> # Delete every dog with age less than 3 >>> db["dogs"].delete_where("age < ?", [3]) Calling ``table.delete_where()`` with no other arguments will delete every row in the table. Pass ``analyze=True`` to run ``ANALYZE`` against the table after deleting the rows. .. _python_api_upsert: Upserting data ============== Upserting allows you to insert records if they do not exist and update them if they DO exist, based on matching against their primary key. For example, given the dogs database you could upsert the record for Cleo like so: .. code-block:: python db["dogs"].upsert({ "id": 1, "name": "Cleo", "twitter": "cleopaws", "age": 4, "is_good_dog": True, }, pk="id", column_order=("id", "twitter", "name")) If a record exists with id=1, it will be updated to match those fields. If it does not exist it will be created. Any existing columns that are not referenced in the dictionary passed to ``.upsert()`` will be unchanged. If you want to replace a record entirely, use ``.insert(doc, replace=True)`` instead. Note that the ``pk`` and ``column_order`` parameters here are optional if you are certain that the table has already been created. You should pass them if the table may not exist at the time the first upsert is performed. An ``upsert_all()`` method is also available, which behaves like ``insert_all()`` but performs upserts instead. .. note:: ``.upsert()`` and ``.upsert_all()`` in sqlite-utils 1.x worked like ``.insert(..., replace=True)`` and ``.insert_all(..., replace=True)`` do in 2.x. See `issue #66 `__ for details of this change. .. _python_api_convert: Converting data in columns ========================== The ``table.convert(...)`` method can be used to apply a conversion function to the values in a column, either to update that column or to populate new columns. It is the Python library equivalent of the :ref:`sqlite-utils convert ` command. This feature works by registering a custom SQLite function that applies a Python transformation, then running a SQL query equivalent to ``UPDATE table SET column = convert_value(column);`` To transform a specific column to uppercase, you would use the following: .. code-block:: python db["dogs"].convert("name", lambda value: value.upper()) You can pass a list of columns, in which case the transformation will be applied to each one: .. code-block:: python db["dogs"].convert(["name", "twitter"], lambda value: value.upper()) To save the output to of the transformation to a different column, use the ``output=`` parameter: .. code-block:: python db["dogs"].convert("name", lambda value: value.upper(), output="name_upper") This will add the new column, if it does not already exist. You can pass ``output_type=int`` or some other type to control the type of the new column - otherwise it will default to text. If you want to drop the original column after saving the results in a separate output column, pass ``drop=True``. By default any rows with a falsey value for the column - such as ``0`` or ``None`` - will be skipped. Pass ``skip_false=False`` to disable this behaviour. You can create multiple new columns from a single input column by passing ``multi=True`` and a conversion function that returns a Python dictionary. This example creates new ``upper`` and ``lower`` columns populated from the single ``title`` column: .. code-block:: python table.convert( "title", lambda v: {"upper": v.upper(), "lower": v.lower()}, multi=True ) The ``.convert()`` method accepts optional ``where=`` and ``where_args=`` parameters which can be used to apply the conversion to a subset of rows specified by a where clause. Here's how to apply the conversion only to rows with an ``id`` that is higher than 20: .. code-block:: python table.convert("title", lambda v: v.upper(), where="id > :id", where_args={"id": 20}) These behave the same as the corresponding parameters to the :ref:`.rows_where() ` method, so you can use ``?`` placeholders and a list of values instead of ``:named`` placeholders with a dictionary. .. _python_api_lookup_tables: Working with lookup tables ========================== A useful pattern when populating large tables in to break common values out into lookup tables. Consider a table of ``Trees``, where each tree has a species. Ideally these species would be split out into a separate ``Species`` table, with each one assigned an integer primary key that can be referenced from the ``Trees`` table ``species_id`` column. .. _python_api_explicit_lookup_tables: Creating lookup tables explicitly --------------------------------- Calling ``db["Species"].lookup({"name": "Palm"})`` creates a table called ``Species`` (if one does not already exist) with two columns: ``id`` and ``name``. It sets up a unique constraint on the ``name`` column to guarantee it will not contain duplicate rows. It then inserts a new row with the ``name`` set to ``Palm`` and returns the new integer primary key value. If the ``Species`` table already exists, it will insert the new row and return the primary key. If a row with that ``name`` already exists, it will return the corresponding primary key value directly. If you call ``.lookup()`` against an existing table without the unique constraint it will attempt to add the constraint, raising an ``IntegrityError`` if the constraint cannot be created. If you pass in a dictionary with multiple values, both values will be used to insert or retrieve the corresponding ID and any unique constraint that is created will cover all of those columns, for example: .. code-block:: python db["Trees"].insert({ "latitude": 49.1265976, "longitude": 2.5496218, "species": db["Species"].lookup({ "common_name": "Common Juniper", "latin_name": "Juniperus communis" }) }) The ``.lookup()`` method has an optional second argument which can be used to populate other columns in the table but only if the row does not exist yet. These columns will not be included in the unique index. To create a species record with a note on when it was first seen, you can use this: .. code-block:: python db["Species"].lookup({"name": "Palm"}, {"first_seen": "2021-03-04"}) The first time this is called the record will be created for ``name="Palm"``. Any subsequent calls with that name will ignore the second argument, even if it includes different values. ``.lookup()`` also accepts keyword arguments, which are passed through to the :ref:`insert() method ` and can be used to influence the shape of the created table. Supported parameters are: - ``pk`` - which defaults to ``id`` - ``foreign_keys`` - ``column_order`` - ``not_null`` - ``defaults`` - ``extracts`` - ``conversions`` - ``columns`` .. _python_api_extracts: Populating lookup tables automatically during insert/upsert ----------------------------------------------------------- A more efficient way to work with lookup tables is to define them using the ``extracts=`` parameter, which is accepted by ``.insert()``, ``.upsert()``, ``.insert_all()``, ``.upsert_all()`` and by the ``.table(...)`` factory function. ``extracts=`` specifies columns which should be "extracted" out into a separate lookup table during the data insertion. It can be either a list of column names, in which case the extracted table names will match the column names exactly, or it can be a dictionary mapping column names to the desired name of the extracted table. To extract the ``species`` column out to a separate ``Species`` table, you can do this: .. code-block:: python # Using the table factory trees = db.table("Trees", extracts={"species": "Species"}) trees.insert({ "latitude": 49.1265976, "longitude": 2.5496218, "species": "Common Juniper" }) # If you want the table to be called 'species', you can do this: trees = db.table("Trees", extracts=["species"]) # Using .insert() directly db["Trees"].insert({ "latitude": 49.1265976, "longitude": 2.5496218, "species": "Common Juniper" }, extracts={"species": "Species"}) .. _python_api_m2m: Working with many-to-many relationships ======================================= ``sqlite-utils`` includes a shortcut for creating records using many-to-many relationships in the form of the ``table.m2m(...)`` method. Here's how to create two new records and connect them via a many-to-many table in a single line of code: .. code-block:: python db["dogs"].insert({"id": 1, "name": "Cleo"}, pk="id").m2m( "humans", {"id": 1, "name": "Natalie"}, pk="id" ) Running this example actually creates three tables: ``dogs``, ``humans`` and a many-to-many ``dogs_humans`` table. It will insert a record into each of those tables. The ``.m2m()`` method executes against the last record that was affected by ``.insert()`` or ``.update()`` - the record identified by the ``table.last_pk`` property. To execute ``.m2m()`` against a specific record you can first select it by passing its primary key to ``.update()``: .. code-block:: python db["dogs"].update(1).m2m( "humans", {"id": 2, "name": "Simon"}, pk="id" ) The first argument to ``.m2m()`` can be either the name of a table as a string or it can be the table object itself. The second argument can be a single dictionary record or a list of dictionaries. These dictionaries will be passed to ``.upsert()`` against the specified table. Here's alternative code that creates the dog record and adds two people to it: .. code-block:: python db = Database(memory=True) dogs = db.table("dogs", pk="id") humans = db.table("humans", pk="id") dogs.insert({"id": 1, "name": "Cleo"}).m2m( humans, [ {"id": 1, "name": "Natalie"}, {"id": 2, "name": "Simon"} ] ) The method will attempt to find an existing many-to-many table by looking for a table that has foreign key relationships against both of the tables in the relationship. If it cannot find such a table, it will create a new one using the names of the two tables - ``dogs_humans`` in this example. You can customize the name of this table using the ``m2m_table=`` argument to ``.m2m()``. It it finds multiple candidate tables with foreign keys to both of the specified tables it will raise a ``sqlite_utils.db.NoObviousTable`` exception. You can avoid this error by specifying the correct table using ``m2m_table=``. The ``.m2m()`` method also takes an optional ``pk=`` argument to specify the primary key that should be used if the table is created, and an optional ``alter=True`` argument to specify that any missing columns of an existing table should be added if they are needed. .. _python_api_m2m_lookup: Using m2m and lookup tables together ------------------------------------ You can work with (or create) lookup tables as part of a call to ``.m2m()`` using the ``lookup=`` parameter. This accepts the same argument as ``table.lookup()`` does - a dictionary of values that should be used to lookup or create a row in the lookup table. This example creates a dogs table, populates it, creates a characteristics table, populates that and sets up a many-to-many relationship between the two. It chains ``.m2m()`` twice to create two associated characteristics: .. code-block:: python db = Database(memory=True) dogs = db.table("dogs", pk="id") dogs.insert({"id": 1, "name": "Cleo"}).m2m( "characteristics", lookup={ "name": "Playful" } ).m2m( "characteristics", lookup={ "name": "Opinionated" } ) You can inspect the database to see the results like this:: >>> db.table_names() ['dogs', 'characteristics', 'characteristics_dogs'] >>> list(db["dogs"].rows) [{'id': 1, 'name': 'Cleo'}] >>> list(db["characteristics"].rows) [{'id': 1, 'name': 'Playful'}, {'id': 2, 'name': 'Opinionated'}] >>> list(db["characteristics_dogs"].rows) [{'characteristics_id': 1, 'dogs_id': 1}, {'characteristics_id': 2, 'dogs_id': 1}] >>> print(db["characteristics_dogs"].schema) CREATE TABLE [characteristics_dogs] ( [characteristics_id] INTEGER REFERENCES [characteristics]([id]), [dogs_id] INTEGER REFERENCES [dogs]([id]), PRIMARY KEY ([characteristics_id], [dogs_id]) ) .. _python_api_analyze_column: Analyzing a column ================== The ``table.analyze_column(column)`` method is used by the :ref:`analyze-tables ` CLI command. It takes the following arguments and options: ``column`` - required The name of the column to analyze ``common_limit`` The number of most common values to return. Defaults to 10. ``value_truncate`` If set to an integer, values longer than this will be truncated to this length. Defaults to None. ``most_common`` If set to False, the ``most_common`` field of the returned ``ColumnDetails`` will be set to None. Defaults to True. ``least_common`` If set to False, the ``least_common`` field of the returned ``ColumnDetails`` will be set to None. Defaults to True. And returns a ``ColumnDetails`` named tuple with the following fields: ``table`` The name of the table ``column`` The name of the column ``total_rows`` The total number of rows in the table ``num_null`` The number of rows for which this column is null ``num_blank`` The number of rows for which this column is blank (the empty string) ``num_distinct`` The number of distinct values in this column ``most_common`` The ``N`` most common values as a list of ``(value, count)`` tuples`, or ``None`` if the table consists entirely of distinct values ``least_common`` The ``N`` least common values as a list of ``(value, count)`` tuples`, or ``None`` if the table is entirely distinct or if the number of distinct values is less than N (since they will already have been returned in ``most_common``) .. _python_api_add_column: Adding columns ============== You can add a new column to a table using the ``.add_column(col_name, col_type)`` method: .. code-block:: python db["dogs"].add_column("instagram", str) db["dogs"].add_column("weight", float) db["dogs"].add_column("dob", datetime.date) db["dogs"].add_column("image", "BLOB") db["dogs"].add_column("website") # str by default You can specify the ``col_type`` argument either using a SQLite type as a string, or by directly passing a Python type e.g. ``str`` or ``float``. The ``col_type`` is optional - if you omit it the type of ``TEXT`` will be used. SQLite types you can specify are ``"TEXT"``, ``"INTEGER"``, ``"FLOAT"`` or ``"BLOB"``. If you pass a Python type, it will be mapped to SQLite types as shown here:: float: "FLOAT" int: "INTEGER" bool: "INTEGER" str: "TEXT" bytes: "BLOB" datetime.datetime: "TEXT" datetime.date: "TEXT" datetime.time: "TEXT" datetime.timedelta: "TEXT" # If numpy is installed np.int8: "INTEGER" np.int16: "INTEGER" np.int32: "INTEGER" np.int64: "INTEGER" np.uint8: "INTEGER" np.uint16: "INTEGER" np.uint32: "INTEGER" np.uint64: "INTEGER" np.float16: "FLOAT" np.float32: "FLOAT" np.float64: "FLOAT" You can also add a column that is a foreign key reference to another table using the ``fk`` parameter: .. code-block:: python db["dogs"].add_column("species_id", fk="species") This will automatically detect the name of the primary key on the species table and use that (and its type) for the new column. You can explicitly specify the column you wish to reference using ``fk_col``: .. code-block:: python db["dogs"].add_column("species_id", fk="species", fk_col="ref") You can set a ``NOT NULL DEFAULT 'x'`` constraint on the new column using ``not_null_default``: .. code-block:: python db["dogs"].add_column("friends_count", int, not_null_default=0) .. _python_api_add_column_alter: Adding columns automatically on insert/update ============================================= You can insert or update data that includes new columns and have the table automatically altered to fit the new schema using the ``alter=True`` argument. This can be passed to all four of ``.insert()``, ``.upsert()``, ``.insert_all()`` and ``.upsert_all()``, or it can be passed to ``db.table(table_name, alter=True)`` to enable it by default for all method calls against that table instance. .. code-block:: python db["new_table"].insert({"name": "Gareth"}) # This will throw an exception: db["new_table"].insert({"name": "Gareth", "age": 32}) # This will succeed and add a new "age" integer column: db["new_table"].insert({"name": "Gareth", "age": 32}, alter=True) # You can see confirm the new column like so: print(db["new_table"].columns_dict) # Outputs this: # {'name': , 'age': } # This works too: new_table = db.table("new_table", alter=True) new_table.insert({"name": "Gareth", "age": 32, "shoe_size": 11}) .. _python_api_add_foreign_key: Adding foreign key constraints ============================== The SQLite ``ALTER TABLE`` statement doesn't have the ability to add foreign key references to an existing column. The ``add_foreign_key()`` method here is a convenient wrapper around :ref:`table.transform() `. It's also possible to add foreign keys by directly updating the `sqlite_master` table. The `sqlite-utils-fast-fks `__ plugin implements this pattern, using code that was included with ``sqlite-utils`` prior to version 3.35. Here's an example of this mechanism in action: .. code-block:: python db["authors"].insert_all([ {"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"} ], pk="id") db["books"].insert_all([ {"title": "Hedgehogs of the world", "author_id": 1}, {"title": "How to train your wolf", "author_id": 2}, ]) db["books"].add_foreign_key("author_id", "authors", "id") The ``table.add_foreign_key(column, other_table, other_column)`` method takes the name of the column, the table that is being referenced and the key column within that other table. If you omit the ``other_column`` argument the primary key from that table will be used automatically. If you omit the ``other_table`` argument the table will be guessed based on some simple rules: - If the column is of format ``author_id``, look for tables called ``author`` or ``authors`` - If the column does not end in ``_id``, try looking for a table with the exact name of the column or that name with an added ``s`` This method first checks that the specified foreign key references tables and columns that exist and does not clash with an existing foreign key. It will raise a ``sqlite_utils.db.AlterError`` exception if these checks fail. To ignore the case where the key already exists, use ``ignore=True``: .. code-block:: python db["books"].add_foreign_key("author_id", "authors", "id", ignore=True) .. _python_api_add_foreign_keys: Adding multiple foreign key constraints at once ----------------------------------------------- You can use ``db.add_foreign_keys(...)`` to add multiple foreign keys in one go. This method takes a list of four-tuples, each one specifying a ``table``, ``column``, ``other_table`` and ``other_column``. Here's an example adding two foreign keys at once: .. code-block:: python db.add_foreign_keys([ ("dogs", "breed_id", "breeds", "id"), ("dogs", "home_town_id", "towns", "id") ]) This method runs the same checks as ``.add_foreign_keys()`` and will raise ``sqlite_utils.db.AlterError`` if those checks fail. .. _python_api_index_foreign_keys: Adding indexes for all foreign keys ----------------------------------- If you want to ensure that every foreign key column in your database has a corresponding index, you can do so like this: .. code-block:: python db.index_foreign_keys() .. _python_api_drop: Dropping a table or view ======================== You can drop a table or view using the ``.drop()`` method: .. code-block:: python db["my_table"].drop() Pass ``ignore=True`` if you want to ignore the error caused by the table or view not existing. .. code-block:: python db["my_table"].drop(ignore=True) .. _python_api_transform: Transforming a table ==================== The SQLite ``ALTER TABLE`` statement is limited. It can add and drop columns and rename tables, but it cannot change column types, change ``NOT NULL`` status or change the primary key for a table. The ``table.transform()`` method can do all of these things, by implementing a multi-step pattern `described in the SQLite documentation `__: 1. Start a transaction 2. ``CREATE TABLE tablename_new_x123`` with the required changes 3. Copy the old data into the new table using ``INSERT INTO tablename_new_x123 SELECT * FROM tablename;`` 4. ``DROP TABLE tablename;`` 5. ``ALTER TABLE tablename_new_x123 RENAME TO tablename;`` 6. Commit the transaction The ``.transform()`` method takes a number of parameters, all of which are optional. As a bonus, calling ``.transform()`` will reformat the schema for the table that is stored in SQLite to make it more readable. This works even if you call it without any arguments. To keep the original table around instead of dropping it, pass the ``keep_table=`` option and specify the name of the table you would like it to be renamed to: .. code-block:: python table.transform(types={"age": int}, keep_table="original_table") .. _python_api_transform_alter_column_types: Altering column types --------------------- To alter the type of a column, use the ``types=`` argument: .. code-block:: python # Convert the 'age' column to an integer, and 'weight' to a float table.transform(types={"age": int, "weight": float}) See :ref:`python_api_add_column` for a list of available types. .. _python_api_transform_rename_columns: Renaming columns ---------------- The ``rename=`` parameter can rename columns: .. code-block:: python # Rename 'age' to 'initial_age': table.transform(rename={"age": "initial_age"}) .. _python_api_transform_drop_columns: Dropping columns ---------------- To drop columns, pass them in the ``drop=`` set: .. code-block:: python # Drop the 'age' column: table.transform(drop={"age"}) .. _python_api_transform_change_primary_keys: Changing primary keys --------------------- To change the primary key for a table, use ``pk=``. This can be passed a single column for a regular primary key, or a tuple of columns to create a compound primary key. Passing ``pk=None`` will remove the primary key and convert the table into a ``rowid`` table. .. code-block:: python # Make `user_id` the new primary key table.transform(pk="user_id") .. _python_api_transform_change_not_null: Changing not null status ------------------------ You can change the ``NOT NULL`` status of columns by using ``not_null=``. You can pass this a set of columns to make those columns ``NOT NULL``: .. code-block:: python # Make the 'age' and 'weight' columns NOT NULL table.transform(not_null={"age", "weight"}) If you want to take existing ``NOT NULL`` columns and change them to allow null values, you can do so by passing a dictionary of true/false values instead: .. code-block:: python # 'age' is NOT NULL but we want to allow NULL: table.transform(not_null={"age": False}) # Make age allow NULL and switch weight to being NOT NULL: table.transform(not_null={"age": False, "weight": True}) .. _python_api_transform_alter_column_defaults: Altering column defaults ------------------------ The ``defaults=`` parameter can be used to set or change the defaults for different columns: .. code-block:: python # Set default age to 1: table.transform(defaults={"age": 1}) # Now remove the default from that column: table.transform(defaults={"age": None}) .. _python_api_transform_change_column_order: Changing column order --------------------- The ``column_order=`` parameter can be used to change the order of the columns. If you pass the names of a subset of the columns those will go first and columns you omitted will appear in their existing order after them. .. code-block:: python # Change column order table.transform(column_order=("name", "age", "id") .. _python_api_transform_add_foreign_key_constraints: Adding foreign key constraints ------------------------------ You can add one or more foreign key constraints to a table using the ``add_foreign_keys=`` parameter: .. code-block:: python db["places"].transform( add_foreign_keys=( ("country", "country", "id"), ("continent", "continent", "id") ) ) This accepts the same arguments described in :ref:`specifying foreign keys ` - so you can specify them as a full tuple of ``(column, other_table, other_column)``, or you can take a shortcut and pass just the name of the column, provided the table can be automatically derived from the column name: .. code-block:: python db["places"].transform( add_foreign_keys=(("country", "continent")) ) .. _python_api_transform_replace_foreign_key_constraints: Replacing foreign key constraints --------------------------------- The ``foreign_keys=`` parameter is similar to to ``add_foreign_keys=`` but can be be used to replace all foreign key constraints on a table, dropping any that are not explicitly mentioned: .. code-block:: python db["places"].transform( foreign_keys=( ("continent", "continent", "id"), ) ) .. _python_api_transform_drop_foreign_key_constraints: Dropping foreign key constraints -------------------------------- You can use ``.transform()`` to remove foreign key constraints from a table. This example drops two foreign keys - the one from ``places.country`` to ``country.id`` and the one from ``places.continent`` to ``continent.id``: .. code-block:: python db["places"].transform( drop_foreign_keys=("country", "continent") ) .. _python_api_transform_sql: Custom transformations with .transform_sql() -------------------------------------------- The ``.transform()`` method can handle most cases, but it does not automatically upgrade indexes, views or triggers associated with the table that is being transformed. If you want to do something more advanced, you can call the ``table.transform_sql(...)`` method with the same arguments that you would have passed to ``table.transform(...)``. This method will return a list of SQL statements that should be executed to implement the change. You can then make modifications to that SQL - or add additional SQL statements - before executing it yourself. .. _python_api_extract: Extracting columns into a separate table ======================================== The ``table.extract()`` method can be used to extract specified columns into a separate table. Imagine a ``Trees`` table that looks like this: === ============ ======= id TreeAddress Species === ============ ======= 1 52 Vine St Palm 2 12 Draft St Oak 3 51 Dark Ave Palm 4 1252 Left St Palm === ============ ======= The ``Species`` column contains duplicate values. This database could be improved by extracting that column out into a separate ``Species`` table and pointing to it using a foreign key column. The schema of the above table is: .. code-block:: sql CREATE TABLE [Trees] ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [Species] TEXT ) Here's how to extract the ``Species`` column using ``.extract()``: .. code-block:: python db["Trees"].extract("Species") After running this code the table schema now looks like this: .. code-block:: sql CREATE TABLE "Trees" ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [Species_id] INTEGER, FOREIGN KEY(Species_id) REFERENCES Species(id) ) A new ``Species`` table will have been created with the following schema: .. code-block:: sql CREATE TABLE [Species] ( [id] INTEGER PRIMARY KEY, [Species] TEXT ) The ``.extract()`` method defaults to creating a table with the same name as the column that was extracted, and adding a foreign key column called ``tablename_id``. You can specify a custom table name using ``table=``, and a custom foreign key name using ``fk_column=``. This example creates a table called ``tree_species`` and a foreign key column called ``tree_species_id``: .. code-block:: python db["Trees"].extract("Species", table="tree_species", fk_column="tree_species_id") The resulting schema looks like this: .. code-block:: sql CREATE TABLE "Trees" ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [tree_species_id] INTEGER, FOREIGN KEY(tree_species_id) REFERENCES tree_species(id) ) CREATE TABLE [tree_species] ( [id] INTEGER PRIMARY KEY, [Species] TEXT ) You can also extract multiple columns into the same external table. Say for example you have a table like this: === ============ ========== ========= id TreeAddress CommonName LatinName === ============ ========== ========= 1 52 Vine St Palm Arecaceae 2 12 Draft St Oak Quercus 3 51 Dark Ave Palm Arecaceae 4 1252 Left St Palm Arecaceae === ============ ========== ========= You can pass ``["CommonName", "LatinName"]`` to ``.extract()`` to extract both of those columns: .. code-block:: python db["Trees"].extract(["CommonName", "LatinName"]) This produces the following schema: .. code-block:: sql CREATE TABLE "Trees" ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [CommonName_LatinName_id] INTEGER, FOREIGN KEY(CommonName_LatinName_id) REFERENCES CommonName_LatinName(id) ) CREATE TABLE [CommonName_LatinName] ( [id] INTEGER PRIMARY KEY, [CommonName] TEXT, [LatinName] TEXT ) The table name ``CommonName_LatinName`` is derived from the extract columns. You can use ``table=`` and ``fk_column=`` to specify custom names like this: .. code-block:: python db["Trees"].extract(["CommonName", "LatinName"], table="Species", fk_column="species_id") This produces the following schema: .. code-block:: sql CREATE TABLE "Trees" ( [id] INTEGER PRIMARY KEY, [TreeAddress] TEXT, [species_id] INTEGER, FOREIGN KEY(species_id) REFERENCES Species(id) ) CREATE TABLE [Species] ( [id] INTEGER PRIMARY KEY, [CommonName] TEXT, [LatinName] TEXT ) You can use the ``rename=`` argument to rename columns in the lookup table. To create a ``Species`` table with columns called ``name`` and ``latin`` you can do this: .. code-block:: python db["Trees"].extract( ["CommonName", "LatinName"], table="Species", fk_column="species_id", rename={"CommonName": "name", "LatinName": "latin"} ) This produces a lookup table like so: .. code-block:: sql CREATE TABLE [Species] ( [id] INTEGER PRIMARY KEY, [name] TEXT, [latin] TEXT ) .. _python_api_hash: Setting an ID based on the hash of the row contents =================================================== Sometimes you will find yourself working with a dataset that includes rows that do not have a provided obvious ID, but where you would like to assign one so that you can later upsert into that table without creating duplicate records. In these cases, a useful technique is to create an ID that is derived from the sha1 hash of the row contents. ``sqlite-utils`` can do this for you using the ``hash_id=`` option. For example:: db = sqlite_utils.Database("dogs.db") db["dogs"].upsert({"name": "Cleo", "twitter": "cleopaws"}, hash_id="id") print(list(db["dogs])) Outputs:: [{'id': 'f501265970505d9825d8d9f590bfab3519fb20b1', 'name': 'Cleo', 'twitter': 'cleopaws'}] If you are going to use that ID straight away, you can access it using ``last_pk``:: dog_id = db["dogs"].upsert({ "name": "Cleo", "twitter": "cleopaws" }, hash_id="id").last_pk # dog_id is now "f501265970505d9825d8d9f590bfab3519fb20b1" The hash will be created using all of the column values. To create a hash using a subset of the columns, pass the ``hash_id_columns=`` parameter:: db["dogs"].upsert( {"name": "Cleo", "twitter": "cleopaws", "age": 7}, hash_id_columns=("name", "twitter") ) The ``hash_id=`` parameter is optional if you specify ``hash_id_columns=`` - it will default to putting the hash in a column called ``id``. You can manually calculate these hashes using the :ref:`hash_record(record, keys=...) ` utility function. .. _python_api_create_view: Creating views ============== The ``.create_view()`` method on the database class can be used to create a view: .. code-block:: python db.create_view("good_dogs", """ select * from dogs where is_good_dog = 1 """) This will raise a ``sqlite_utils.utils.OperationalError`` if a view with that name already exists. You can pass ``ignore=True`` to silently ignore an existing view and do nothing, or ``replace=True`` to replace an existing view with a new definition if your select statement differs from the current view: .. code-block:: python db.create_view("good_dogs", """ select * from dogs where is_good_dog = 1 """, replace=True) Storing JSON ============ SQLite has `excellent JSON support `_, and ``sqlite-utils`` can help you take advantage of this: if you attempt to insert a value that can be represented as a JSON list or dictionary, ``sqlite-utils`` will create TEXT column and store your data as serialized JSON. This means you can quickly store even complex data structures in SQLite and query them using JSON features. For example: .. code-block:: python db["niche_museums"].insert({ "name": "The Bigfoot Discovery Museum", "url": "http://bigfootdiscoveryproject.com/" "hours": { "Monday": [11, 18], "Wednesday": [11, 18], "Thursday": [11, 18], "Friday": [11, 18], "Saturday": [11, 18], "Sunday": [11, 18] }, "address": { "streetAddress": "5497 Highway 9", "addressLocality": "Felton, CA", "postalCode": "95018" } }) db.execute(""" select json_extract(address, '$.addressLocality') from niche_museums """).fetchall() # Returns [('Felton, CA',)] .. _python_api_conversions: Converting column values using SQL functions ============================================ Sometimes it can be useful to run values through a SQL function prior to inserting them. A simple example might be converting a value to upper case while it is being inserted. The ``conversions={...}`` parameter can be used to specify custom SQL to be used as part of a ``INSERT`` or ``UPDATE`` SQL statement. You can specify an upper case conversion for a specific column like so: .. code-block:: python db["example"].insert({ "name": "The Bigfoot Discovery Museum" }, conversions={"name": "upper(?)"}) # list(db["example"].rows) now returns: # [{'name': 'THE BIGFOOT DISCOVERY MUSEUM'}] The dictionary key is the column name to be converted. The value is the SQL fragment to use, with a ``?`` placeholder for the original value. A more useful example: if you are working with `SpatiaLite `__ you may find yourself wanting to create geometry values from a WKT value. Code to do that could look like this: .. code-block:: python import sqlite3 import sqlite_utils from shapely.geometry import shape import httpx db = sqlite_utils.Database("places.db") # Initialize SpatiaLite db.init_spatialite() # Use sqlite-utils to create a places table places = db["places"].create({"id": int, "name": str}) # Add a SpatiaLite 'geometry' column places.add_geometry_column("geometry", "MULTIPOLYGON") # Fetch some GeoJSON from Who's On First: geojson = httpx.get( "https://raw.githubusercontent.com/whosonfirst-data/" "whosonfirst-data-admin-gb/master/data/404/227/475/404227475.geojson" ).json() # Convert to "Well Known Text" format using shapely wkt = shape(geojson["geometry"]).wkt # Insert the record, converting the WKT to a SpatiaLite geometry: db["places"].insert( {"name": "Wales", "geometry": wkt}, conversions={"geometry": "GeomFromText(?, 4326)"}, ) This example uses gographical data from `Who's On First `__ and depends on the `Shapely `__ and `HTTPX `__ Python libraries. .. _python_api_sqlite_version: Checking the SQLite version =========================== The ``db.sqlite_version`` property returns a tuple of integers representing the version of SQLite used for that database object:: >>> db.sqlite_version (3, 36, 0) .. _python_api_itedump: Dumping the database to SQL =========================== The ``db.iterdump()`` method returns a sequence of SQL strings representing a complete dump of the database. Use it like this: .. code-block:: python full_sql = "".join(db.iterdump()) This uses the `sqlite3.Connection.iterdump() `__ method. If you are using ``pysqlite3`` or ``sqlean.py`` the underlying method may be missing. If you install the `sqlite-dump `__ package then the ``db.iterdump()`` method will use that implementation instead: .. code-block:: bash pip install sqlite-dump .. _python_api_introspection: Introspecting tables and views ============================== If you have loaded an existing table or view, you can use introspection to find out more about it:: >>> db["PlantType"]
.. _python_api_introspection_exists: .exists() --------- The ``.exists()`` method can be used to find out if a table exists or not:: >>> db["PlantType"].exists() True >>> db["PlantType2"].exists() False .. _python_api_introspection_count: .count ------ The ``.count`` property shows the current number of rows (``select count(*) from table``):: >>> db["PlantType"].count 3 >>> db["Street_Tree_List"].count 189144 This property will take advantage of :ref:`python_api_cached_table_counts` if the ``use_counts_table`` property is set on the database. You can avoid that optimization entirely by calling ``table.count_where()`` instead of accessing the property. .. _python_api_introspection_columns: .columns -------- The ``.columns`` property shows the columns in the table or view. It returns a list of ``Column(cid, name, type, notnull, default_value, is_pk)`` named tuples. :: >>> db["PlantType"].columns [Column(cid=0, name='id', type='INTEGER', notnull=0, default_value=None, is_pk=1), Column(cid=1, name='value', type='TEXT', notnull=0, default_value=None, is_pk=0)] .. _python_api_introspection_columns_dict: .columns_dict ------------- The ``.columns_dict`` property returns a dictionary version of the columns with just the names and Python types:: >>> db["PlantType"].columns_dict {'id': , 'value': } .. _python_api_introspection_default_values: .default_values --------------- The ``.default_values`` property returns a dictionary of default values for each column that has a default:: >>> db["table_with_defaults"].default_values {'score': 5} .. _python_api_introspection_pks: .pks ---- The ``.pks`` property returns a list of strings naming the primary key columns for the table:: >>> db["PlantType"].pks ['id'] If a table has no primary keys but is a `rowid table `__, this property will return ``['rowid']``. .. _python_api_introspection_use_rowid: .use_rowid ---------- Almost all SQLite tables have a ``rowid`` column, but a table with no explicitly defined primary keys must use that ``rowid`` as the primary key for identifying individual rows. The ``.use_rowid`` property checks to see if a table needs to use the ``rowid`` in this way - it returns ``True`` if the table has no explicitly defined primary keys and ``False`` otherwise. >>> db["PlantType"].use_rowid False .. _python_api_introspection_foreign_keys: .foreign_keys ------------- The ``.foreign_keys`` property returns any foreign key relationships for the table, as a list of ``ForeignKey(table, column, other_table, other_column)`` named tuples. It is not available on views. :: >>> db["Street_Tree_List"].foreign_keys [ForeignKey(table='Street_Tree_List', column='qLegalStatus', other_table='qLegalStatus', other_column='id'), ForeignKey(table='Street_Tree_List', column='qCareAssistant', other_table='qCareAssistant', other_column='id'), ForeignKey(table='Street_Tree_List', column='qSiteInfo', other_table='qSiteInfo', other_column='id'), ForeignKey(table='Street_Tree_List', column='qSpecies', other_table='qSpecies', other_column='id'), ForeignKey(table='Street_Tree_List', column='qCaretaker', other_table='qCaretaker', other_column='id'), ForeignKey(table='Street_Tree_List', column='PlantType', other_table='PlantType', other_column='id')] .. _python_api_introspection_schema: .schema ------- The ``.schema`` property outputs the table's schema as a SQL string:: >>> print(db["Street_Tree_List"].schema) CREATE TABLE "Street_Tree_List" ( "TreeID" INTEGER, "qLegalStatus" INTEGER, "qSpecies" INTEGER, "qAddress" TEXT, "SiteOrder" INTEGER, "qSiteInfo" INTEGER, "PlantType" INTEGER, "qCaretaker" INTEGER, "qCareAssistant" INTEGER, "PlantDate" TEXT, "DBH" INTEGER, "PlotSize" TEXT, "PermitNotes" TEXT, "XCoord" REAL, "YCoord" REAL, "Latitude" REAL, "Longitude" REAL, "Location" TEXT , FOREIGN KEY ("PlantType") REFERENCES [PlantType](id), FOREIGN KEY ("qCaretaker") REFERENCES [qCaretaker](id), FOREIGN KEY ("qSpecies") REFERENCES [qSpecies](id), FOREIGN KEY ("qSiteInfo") REFERENCES [qSiteInfo](id), FOREIGN KEY ("qCareAssistant") REFERENCES [qCareAssistant](id), FOREIGN KEY ("qLegalStatus") REFERENCES [qLegalStatus](id)) .. _python_api_introspection_strict: .strict ------- The ``.strict`` property identifies if the table is a `SQLite STRICT table `__. :: >>> db["ny_times_us_counties"].strict False .. _python_api_introspection_indexes: .indexes -------- The ``.indexes`` property returns all indexes created for a table, as a list of ``Index(seq, name, unique, origin, partial, columns)`` named tuples. It is not available on views. :: >>> db["Street_Tree_List"].indexes [Index(seq=0, name='"Street_Tree_List_qLegalStatus"', unique=0, origin='c', partial=0, columns=['qLegalStatus']), Index(seq=1, name='"Street_Tree_List_qCareAssistant"', unique=0, origin='c', partial=0, columns=['qCareAssistant']), Index(seq=2, name='"Street_Tree_List_qSiteInfo"', unique=0, origin='c', partial=0, columns=['qSiteInfo']), Index(seq=3, name='"Street_Tree_List_qSpecies"', unique=0, origin='c', partial=0, columns=['qSpecies']), Index(seq=4, name='"Street_Tree_List_qCaretaker"', unique=0, origin='c', partial=0, columns=['qCaretaker']), Index(seq=5, name='"Street_Tree_List_PlantType"', unique=0, origin='c', partial=0, columns=['PlantType'])] .. _python_api_introspection_xindexes: .xindexes --------- The ``.xindexes`` property returns more detailed information about the indexes on the table, using the SQLite `PRAGMA index_xinfo() `__ mechanism. It returns a list of ``XIndex(name, columns)`` named tuples, where ``columns`` is a list of ``XIndexColumn(seqno, cid, name, desc, coll, key)`` named tuples. :: >>> db["ny_times_us_counties"].xindexes [ XIndex( name='idx_ny_times_us_counties_date', columns=[ XIndexColumn(seqno=0, cid=0, name='date', desc=1, coll='BINARY', key=1), XIndexColumn(seqno=1, cid=-1, name=None, desc=0, coll='BINARY', key=0) ] ), XIndex( name='idx_ny_times_us_counties_fips', columns=[ XIndexColumn(seqno=0, cid=3, name='fips', desc=0, coll='BINARY', key=1), XIndexColumn(seqno=1, cid=-1, name=None, desc=0, coll='BINARY', key=0) ] ) ] .. _python_api_introspection_triggers: .triggers --------- The ``.triggers`` property lists database triggers. It can be used on both database and table objects. It returns a list of ``Trigger(name, table, sql)`` named tuples. :: >>> db["authors"].triggers [Trigger(name='authors_ai', table='authors', sql='CREATE TRIGGER [authors_ai] AFTER INSERT...'), Trigger(name='authors_ad', table='authors', sql="CREATE TRIGGER [authors_ad] AFTER DELETE..."), Trigger(name='authors_au', table='authors', sql="CREATE TRIGGER [authors_au] AFTER UPDATE")] >>> db.triggers ... similar output to db["authors"].triggers .. _python_api_introspection_triggers_dict: .triggers_dict -------------- The ``.triggers_dict`` property returns the triggers for that table as a dictionary mapping their names to their SQL definitions. :: >>> db["authors"].triggers_dict {'authors_ai': 'CREATE TRIGGER [authors_ai] AFTER INSERT...', 'authors_ad': 'CREATE TRIGGER [authors_ad] AFTER DELETE...', 'authors_au': 'CREATE TRIGGER [authors_au] AFTER UPDATE'} The same property exists on the database, and will return all triggers across all tables: :: >>> db.triggers_dict {'authors_ai': 'CREATE TRIGGER [authors_ai] AFTER INSERT...', 'authors_ad': 'CREATE TRIGGER [authors_ad] AFTER DELETE...', 'authors_au': 'CREATE TRIGGER [authors_au] AFTER UPDATE'} .. _python_api_introspection_detect_fts: .detect_fts() ------------- The ``detect_fts()`` method returns the associated SQLite FTS table name, if one exists for this table. If the table has not been configured for full-text search it returns ``None``. :: >>> db["authors"].detect_fts() "authors_fts" .. _python_api_introspection_virtual_table_using: .virtual_table_using -------------------- The ``.virtual_table_using`` property reveals if a table is a virtual table. It returns ``None`` for regular tables and the upper case version of the type of virtual table otherwise. For example:: >>> db["authors"].enable_fts(["name"]) >>> db["authors_fts"].virtual_table_using "FTS5" .. _python_api_introspection_has_counts_triggers: .has_counts_triggers -------------------- The ``.has_counts_triggers`` property shows if a table has been configured with triggers for updating a ``_counts`` table, as described in :ref:`python_api_cached_table_counts`. :: >>> db["authors"].has_counts_triggers False >>> db["authors"].enable_counts() >>> db["authors"].has_counts_triggers True .. _python_api_fts: Full-text search ================ SQLite includes bundled extensions that implement `powerful full-text search `__. .. _python_api_fts_enable: Enabling full-text search for a table ------------------------------------- You can enable full-text search on a table using ``.enable_fts(columns)``: .. code-block:: python db["dogs"].enable_fts(["name", "twitter"]) You can then run searches using the ``.search()`` method: .. code-block:: python rows = list(db["dogs"].search("cleo")) This method returns a generator that can be looped over to get dictionaries for each row, similar to :ref:`python_api_rows`. If you insert additional records into the table you will need to refresh the search index using ``populate_fts()``: .. code-block:: python db["dogs"].insert({ "id": 2, "name": "Marnie", "twitter": "MarnieTheDog", "age": 16, "is_good_dog": True, }, pk="id") db["dogs"].populate_fts(["name", "twitter"]) A better solution is to use database triggers. You can set up database triggers to automatically update the full-text index using ``create_triggers=True``: .. code-block:: python db["dogs"].enable_fts(["name", "twitter"], create_triggers=True) ``.enable_fts()`` defaults to using `FTS5 `__. If you wish to use `FTS4 `__ instead, use the following: .. code-block:: python db["dogs"].enable_fts(["name", "twitter"], fts_version="FTS4") You can customize the tokenizer configured for the table using the ``tokenize=`` parameter. For example, to enable Porter stemming, where English words like "running" will match stemmed alternatives such as "run", use ``tokenize="porter"``: .. code-block:: python db["articles"].enable_fts(["headline", "body"], tokenize="porter") The SQLite documentation has more on `FTS5 tokenizers `__ and `FTS4 tokenizers `__. ``porter`` is a valid option for both. If you attempt to configure a FTS table where one already exists, a ``sqlite3.OperationalError`` exception will be raised. You can replace the existing table with a new configuration using ``replace=True``: .. code-block:: python db["articles"].enable_fts(["headline"], tokenize="porter", replace=True) This will have no effect if the FTS table already exists, otherwise it will drop and recreate the table with the new settings. This takes into consideration the columns, the tokenizer, the FTS version used and whether or not the table has triggers. To remove the FTS tables and triggers you created, use the ``disable_fts()`` table method: .. code-block:: python db["dogs"].disable_fts() .. _python_api_quote_fts: Quoting characters for use in search ------------------------------------ SQLite supports `advanced search query syntax `__. In some situations you may wish to disable this, since characters such as ``.`` may have special meaning that causes errors when searching for strings provided by your users. The ``db.quote_fts(query)`` method returns the query with SQLite full-text search quoting applied such that the query should be safe to use in a search:: db.quote_fts("Search term.") # Returns: '"Search" "term."' .. _python_api_fts_search: Searching with table.search() ----------------------------- The ``table.search(q)`` method returns a generator over Python dictionaries representing rows that match the search phrase ``q``, ordered by relevance with the most relevant results first. .. code-block:: python for article in db["articles"].search("jquery"): print(article) The ``.search()`` method also accepts the following optional parameters: ``order_by`` string The column to sort by. Defaults to relevance score. Can optionally include a ``desc``, e.g. ``rowid desc``. ``columns`` array of strings Columns to return. Defaults to all columns. ``limit`` integer Number of results to return. Defaults to all results. ``offset`` integer Offset to use along side the limit parameter. ``where`` string Extra SQL fragment for the WHERE clause ``where_args`` dictionary Arguments to use for ``:param`` placeholders in the extra WHERE clause ``quote`` bool Apply :ref:`FTS quoting rules ` to the search query, disabling advanced query syntax in a way that avoids surprising errors. To return just the title and published columns for three matches for ``"dog"`` where the ``id`` is greater than 10 ordered by ``published`` with the most recent first, use the following: .. code-block:: python for article in db["articles"].search( "dog", order_by="published desc", limit=3, where="id > :min_id", where_args={"min_id": 10}, columns=["title", "published"] ): print(article) .. _python_api_fts_search_sql: Building SQL queries with table.search_sql() -------------------------------------------- You can generate the SQL query that would be used for a search using the ``table.search_sql()`` method. It takes the same arguments as ``table.search()``, with the exception of the search query and the ``where_args`` parameter, since those should be provided when the returned SQL is executed. .. code-block:: python print(db["articles"].search_sql(columns=["title", "author"])) Outputs: .. code-block:: sql with original as ( select rowid, [title], [author] from [articles] ) select [original].[title], [original].[author] from [original] join [articles_fts] on [original].rowid = [articles_fts].rowid where [articles_fts] match :query order by [articles_fts].rank This method detects if a SQLite table uses FTS4 or FTS5, and outputs the correct SQL for ordering by relevance depending on the search type. The FTS4 output looks something like this: .. code-block:: sql with original as ( select rowid, [title], [author] from [articles] ) select [original].[title], [original].[author] from [original] join [articles_fts] on [original].rowid = [articles_fts].rowid where [articles_fts] match :query order by rank_bm25(matchinfo([articles_fts], 'pcnalx')) This uses the ``rank_bm25()`` custom SQL function from `sqlite-fts4 `__. You can register that custom function against a ``Database`` connection using this method: .. code-block:: python db.register_fts4_bm25() .. _python_api_fts_rebuild: Rebuilding a full-text search table =================================== You can rebuild a table using the ``table.rebuild_fts()`` method. This is useful for if the table configuration changes or the indexed data has become corrupted in some way. .. code-block:: python db["dogs"].rebuild_fts() This method can be called on a table that has been configured for full-text search - ``dogs`` in this instance - or directly on a ``_fts`` table: .. code-block:: python db["dogs_fts"].rebuild_fts() This runs the following SQL:: INSERT INTO dogs_fts (dogs_fts) VALUES ("rebuild"); .. _python_api_fts_optimize: Optimizing a full-text search table =================================== Once you have populated a FTS table you can optimize it to dramatically reduce its size like so: .. code-block:: python db["dogs"].optimize() This runs the following SQL:: INSERT INTO dogs_fts (dogs_fts) VALUES ("optimize"); .. _python_api_cached_table_counts: Cached table counts using triggers ================================== The ``select count(*)`` query in SQLite requires a full scan of the primary key index, and can take an increasingly long time as the table grows larger. The ``table.enable_counts()`` method can be used to configure triggers to continuously update a record in a ``_counts`` table. This value can then be used to quickly retrieve the count of rows in the associated table. .. code-block:: python db["dogs"].enable_counts() This will create the ``_counts`` table if it does not already exist, with the following schema: .. code-block:: sql CREATE TABLE [_counts] ( [table] TEXT PRIMARY KEY, [count] INTEGER DEFAULT 0 ) You can enable cached counts for every table in a database (except for virtual tables and the ``_counts`` table itself) using the database ``enable_counts()`` method: .. code-block:: python db.enable_counts() Once enabled, table counts will be stored in the ``_counts`` table. The count records will be automatically kept up-to-date by the triggers when rows are added or deleted to the table. To access these counts you can query the ``_counts`` table directly or you can use the ``db.cached_counts()`` method. This method returns a dictionary mapping tables to their counts:: >>> db.cached_counts() {'global-power-plants': 33643, 'global-power-plants_fts_data': 136, 'global-power-plants_fts_idx': 199, 'global-power-plants_fts_docsize': 33643, 'global-power-plants_fts_config': 1} You can pass a list of table names to this method to retrieve just those counts:: >>> db.cached_counts(["global-power-plants"]) {'global-power-plants': 33643} The ``table.count`` property executes a ``select count(*)`` query by default, unless the ``db.use_counts_table`` property is set to ``True``. You can set ``use_counts_table`` to ``True`` when you instantiate the database object: .. code-block:: python db = Database("global-power-plants.db", use_counts_table=True) If the property is ``True`` any calls to the ``table.count`` property will first attempt to find the cached count in the ``_counts`` table, and fall back on a ``count(*)`` query if the value is not available or the table is missing. Calling the ``.enable_counts()`` method on a database or table object will set ``use_counts_table`` to ``True`` for the lifetime of that database object. If the ``_counts`` table ever becomes out-of-sync with the actual table counts you can repair it using the ``.reset_counts()`` method: .. code-block:: python db.reset_counts() .. _python_api_create_index: Creating indexes ================ You can create an index on a table using the ``.create_index(columns)`` method. The method takes a list of columns: .. code-block:: python db["dogs"].create_index(["is_good_dog"]) By default the index will be named ``idx_{table-name}_{columns}``. If you pass ``find_unique_name=True`` and the automatically derived name already exists, an available name will be found by incrementing a suffix number, for example ``idx_items_title_2``. You can customize the name of the created index by passing the ``index_name`` parameter: .. code-block:: python db["dogs"].create_index( ["is_good_dog", "age"], index_name="good_dogs_by_age" ) To create an index in descending order for a column, wrap the column name in ``db.DescIndex()`` like this: .. code-block:: python from sqlite_utils.db import DescIndex db["dogs"].create_index( ["is_good_dog", DescIndex("age")], index_name="good_dogs_by_age" ) You can create a unique index by passing ``unique=True``: .. code-block:: python db["dogs"].create_index(["name"], unique=True) Use ``if_not_exists=True`` to do nothing if an index with that name already exists. Pass ``analyze=True`` to run ``ANALYZE`` against the new index after creating it. .. _python_api_analyze: Optimizing index usage with ANALYZE =================================== The `SQLite ANALYZE command `__ builds a table of statistics which the query planner can use to make better decisions about which indexes to use for a given query. You should run ``ANALYZE`` if your database is large and you do not think your indexes are being efficiently used. To run ``ANALYZE`` against every index in a database, use this: .. code-block:: python db.analyze() To run it just against a specific named index, pass the name of the index to that method: .. code-block:: python db.analyze("idx_countries_country_name") To run against all indexes attached to a specific table, you can either pass the table name to ``db.analyze(...)`` or you can call the method directly on the table, like this: .. code-block:: python db["dogs"].analyze() .. _python_api_vacuum: Vacuum ====== You can optimize your database by running VACUUM against it like so: .. code-block:: python Database("my_database.db").vacuum() .. _python_api_wal: WAL mode ======== You can enable `Write-Ahead Logging `__ for a database with ``.enable_wal()``: .. code-block:: python Database("my_database.db").enable_wal() You can disable WAL mode using ``.disable_wal()``: .. code-block:: python Database("my_database.db").disable_wal() You can check the current journal mode for a database using the ``journal_mode`` property: .. code-block:: python journal_mode = Database("my_database.db").journal_mode This will usually be ``wal`` or ``delete`` (meaning WAL is disabled), but can have other values - see the `PRAGMA journal_mode `__ documentation. .. _python_api_suggest_column_types: Suggesting column types ======================= When you create a new table for a list of inserted or upserted Python dictionaries, those methods detect the correct types for the database columns based on the data you pass in. In some situations you may need to intervene in this process, to customize the columns that are being created in some way - see :ref:`python_api_explicit_create`. That table ``.create()`` method takes a dictionary mapping column names to the Python type they should store: .. code-block:: python db["cats"].create({ "id": int, "name": str, "weight": float, }) You can use the ``suggest_column_types()`` helper function to derive a dictionary of column names and types from a list of records, suitable to be passed to ``table.create()``. For example: .. code-block:: python from sqlite_utils import Database, suggest_column_types cats = [{ "id": 1, "name": "Snowflake" }, { "id": 2, "name": "Crabtree", "age": 4 }] types = suggest_column_types(cats) # types now looks like this: # {"id": , # "name": , # "age": } # Manually add an extra field: types["thumbnail"] = bytes # types now looks like this: # {"id": , # "name": , # "age": , # "thumbnail": } # Create the table db = Database("cats.db") db["cats"].create(types, pk="id") # Insert the records db["cats"].insert_all(cats) # list(db["cats"].rows) now returns: # [{"id": 1, "name": "Snowflake", "age": None, "thumbnail": None} # {"id": 2, "name": "Crabtree", "age": 4, "thumbnail": None}] # The table schema looks like this: # print(db["cats"].schema) # CREATE TABLE [cats] ( # [id] INTEGER PRIMARY KEY, # [name] TEXT, # [age] INTEGER, # [thumbnail] BLOB # ) .. _python_api_register_function: Registering custom SQL functions ================================ SQLite supports registering custom SQL functions written in Python. The ``db.register_function()`` method lets you register these functions, and keeps track of functions that have already been registered. If you use it as a method it will automatically detect the name and number of arguments needed by the function: .. code-block:: python from sqlite_utils import Database db = Database(memory=True) def reverse_string(s): return "".join(reversed(list(s))) db.register_function(reverse_string) print(db.execute('select reverse_string("hello")').fetchone()[0]) # This prints "olleh" You can also use the method as a function decorator like so: .. code-block:: python @db.register_function def reverse_string(s): return "".join(reversed(list(s))) print(db.execute('select reverse_string("hello")').fetchone()[0]) By default, the name of the Python function will be used as the name of the SQL function. You can customize this with the ``name=`` keyword argument: .. code-block:: python @db.register_function(name="rev") def reverse_string(s): return "".join(reversed(list(s))) print(db.execute('select rev("hello")').fetchone()[0]) Python 3.8 added the ability to register `deterministic SQLite functions `__, allowing you to indicate that a function will return the exact same result for any given inputs and hence allowing SQLite to apply some performance optimizations. You can mark a function as deterministic using ``deterministic=True``, like this: .. code-block:: python @db.register_function(deterministic=True) def reverse_string(s): return "".join(reversed(list(s))) If you run this on a version of Python prior to 3.8 your code will still work, but the ``deterministic=True`` parameter will be ignored. By default registering a function with the same name and number of arguments will have no effect - the ``Database`` instance keeps track of functions that have already been registered and skips registering them if ``@db.register_function`` is called a second time. If you want to deliberately replace the registered function with a new implementation, use the ``replace=True`` argument: .. code-block:: python @db.register_function(deterministic=True, replace=True) def reverse_string(s): return s[::-1] Exceptions that occur inside a user-defined function default to returning the following error:: Unexpected error: user-defined function raised exception You can cause ``sqlite3`` to return more useful errors, including the traceback from the custom function, by executing the following before your custom functions are executed: .. code-block:: python from sqlite_utils.utils import sqlite3 sqlite3.enable_callback_tracebacks(True) .. _python_api_quote: Quoting strings for use in SQL ============================== In almost all cases you should pass values to your SQL queries using the optional ``parameters`` argument to ``db.query()``, as described in :ref:`python_api_parameters`. If that option isn't relevant to your use-case you can to quote a string for use with SQLite using the ``db.quote()`` method, like so: :: >>> db = Database(memory=True) >>> db.quote("hello") "'hello'" >>> db.quote("hello'this'has'quotes") "'hello''this''has''quotes'" .. _python_api_rows_from_file: Reading rows from a file ======================== The ``sqlite_utils.utils.rows_from_file()`` helper function can read rows (a sequence of dictionaries) from CSV, TSV, JSON or newline-delimited JSON files. .. autofunction:: sqlite_utils.utils.rows_from_file :noindex: .. _python_api_maximize_csv_field_size_limit: Setting the maximum CSV field size limit ======================================== Sometimes when working with CSV files that include extremely long fields you may see an error that looks like this:: _csv.Error: field larger than field limit (131072) The Python standard library ``csv`` module enforces a field size limit. You can increase that limit using the ``csv.field_size_limit(new_limit)`` method (`documented here `__) but if you don't want to pick a new level you may instead want to increase it to the maximum possible. The maximum possible value for this is not documented, and varies between systems. Calling ``sqlite_utils.utils.maximize_csv_field_size_limit()`` will set the value to the highest possible for the current system: .. code-block:: python from sqlite_utils.utils import maximize_csv_field_size_limit maximize_csv_field_size_limit() If you need to reset to the original value after calling this function you can do so like this: .. code-block:: python from sqlite_utils.utils import ORIGINAL_CSV_FIELD_SIZE_LIMIT import csv csv.field_size_limit(ORIGINAL_CSV_FIELD_SIZE_LIMIT) .. _python_api_typetracker: Detecting column types using TypeTracker ======================================== Sometimes you may find yourself working with data that lacks type information - data from a CSV file for example. The ``TypeTracker`` class can be used to try to automatically identify the most likely types for data that is initially represented as strings. Consider this example: .. code-block:: python import csv, io csv_file = io.StringIO("id,name\n1,Cleo\n2,Cardi") rows = list(csv.DictReader(csv_file)) # rows is now this: # [{'id': '1', 'name': 'Cleo'}, {'id': '2', 'name': 'Cardi'}] If we insert this data directly into a table we will get a schema that is entirely ``TEXT`` columns: .. code-block:: python from sqlite_utils import Database db = Database(memory=True) db["creatures"].insert_all(rows) print(db.schema) # Outputs: # CREATE TABLE [creatures] ( # [id] TEXT, # [name] TEXT # ); We can detect the best column types using a ``TypeTracker`` instance: .. code-block:: python from sqlite_utils.utils import TypeTracker tracker = TypeTracker() db["creatures2"].insert_all(tracker.wrap(rows)) print(tracker.types) # Outputs {'id': 'integer', 'name': 'text'} We can then apply those types to our new table using the :ref:`table.transform() ` method: .. code-block:: python db["creatures2"].transform(types=tracker.types) print(db["creatures2"].schema) # Outputs: # CREATE TABLE [creatures2] ( # [id] INTEGER, # [name] TEXT # ); .. _python_api_gis: SpatiaLite helpers ================== `SpatiaLite `__ is a geographic extension to SQLite (similar to PostgreSQL + PostGIS). Using requires finding, loading and initializing the extension, adding geometry columns to existing tables and optionally creating spatial indexes. The utilities here help streamline that setup. .. _python_api_gis_init_spatialite: Initialize SpatiaLite --------------------- .. automethod:: sqlite_utils.db.Database.init_spatialite :noindex: .. _python_api_gis_find_spatialite: Finding SpatiaLite ------------------ .. autofunction:: sqlite_utils.utils.find_spatialite .. _python_api_gis_add_geometry_column: Adding geometry columns ----------------------- .. automethod:: sqlite_utils.db.Table.add_geometry_column :noindex: .. _python_api_gis_create_spatial_index: Creating a spatial index ------------------------ .. automethod:: sqlite_utils.db.Table.create_spatial_index :noindex: sqlite-utils-3.35.2/docs/reference.rst000066400000000000000000000044041452131415600176670ustar00rootroot00000000000000.. _reference: =============== API reference =============== .. contents:: :local: :class: this-will-duplicate-information-and-it-is-still-useful-here .. _reference_db_database: sqlite_utils.db.Database ======================== .. autoclass:: sqlite_utils.db.Database :members: :undoc-members: :special-members: __getitem__ :exclude-members: use_counts_table, execute_returning_dicts, resolve_foreign_keys .. _reference_db_queryable: sqlite_utils.db.Queryable ========================= :ref:`Table ` and :ref:`View ` are both subclasses of ``Queryable``, providing access to the following methods: .. autoclass:: sqlite_utils.db.Queryable :members: :undoc-members: :exclude-members: execute_count .. _reference_db_table: sqlite_utils.db.Table ===================== .. autoclass:: sqlite_utils.db.Table :members: :undoc-members: :show-inheritance: :exclude-members: guess_foreign_column, value_or_default, build_insert_queries_and_params, insert_chunk, add_missing_columns .. _reference_db_view: sqlite_utils.db.View ==================== .. autoclass:: sqlite_utils.db.View :members: :undoc-members: :show-inheritance: .. _reference_db_other: Other ===== .. _reference_db_other_column: sqlite_utils.db.Column ---------------------- .. autoclass:: sqlite_utils.db.Column .. _reference_db_other_column_details: sqlite_utils.db.ColumnDetails ----------------------------- .. autoclass:: sqlite_utils.db.ColumnDetails sqlite_utils.utils ================== .. _reference_utils_hash_record: sqlite_utils.utils.hash_record ------------------------------ .. autofunction:: sqlite_utils.utils.hash_record .. _reference_utils_rows_from_file: sqlite_utils.utils.rows_from_file --------------------------------- .. autofunction:: sqlite_utils.utils.rows_from_file .. _reference_utils_typetracker: sqlite_utils.utils.TypeTracker ------------------------------ .. autoclass:: sqlite_utils.utils.TypeTracker :members: wrap, types .. _reference_utils_chunks: sqlite_utils.utils.chunks ------------------------- .. autofunction:: sqlite_utils.utils.chunks .. _reference_utils_flatten: sqlite_utils.utils.flatten -------------------------- .. autofunction:: sqlite_utils.utils.flatten sqlite-utils-3.35.2/docs/tutorial.ipynb000066400000000000000000000755531452131415600201220ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "id": "27ae18ec", "metadata": {}, "source": [ "# The sqlite-utils tutorial\n", "\n", "[sqlite-utils](https://sqlite-utils.datasette.io/en/stable/python-api.html) is a Python library (and [command-line tool](https://sqlite-utils.datasette.io/en/stable/cli.html) for quickly creating and manipulating SQLite database files.\n", "\n", "This tutorial will show you how to use the Python library to manipulate data.\n", "\n", "## Installation\n", "\n", "To install the library, run:\n", "\n", " pip install sqlite-utils\n", "\n", "You can run this in a Jupyter notebook cell by executing:\n", "\n", " %pip install sqlite-utils\n", " \n", "Or use `pip install -U sqlite-utils` to ensure you have upgraded to the most recent version." ] }, { "cell_type": "code", "execution_count": 1, "id": "bddee0d2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: sqlite_utils in /usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-packages (3.14)\n", "Requirement already satisfied: click-default-group in /usr/local/lib/python3.9/site-packages (from sqlite_utils) (1.2.2)\n", "Requirement already satisfied: sqlite-fts4 in /usr/local/lib/python3.9/site-packages (from sqlite_utils) (1.0.1)\n", "Requirement already satisfied: click in /Users/simon/Library/Python/3.9/lib/python/site-packages (from sqlite_utils) (7.1.2)\n", "Requirement already satisfied: tabulate in /usr/local/lib/python3.9/site-packages (from sqlite_utils) (0.8.7)\n", "Requirement already satisfied: python-dateutil in /usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-package (from sqlite-utils) (2.8.1)\n", "Requirement already satisfied: six>=1.5 in /usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-package (from python-dateutil->sqlite-utils) (1.16.0)\n", "\u001b[33mWARNING: You are using pip version 21.1.1; however, version 21.2.2 is available.\n", "You should consider upgrading via the '/usr/local/Cellar/jupyterlab/3.0.16_1/libexec/bin/python3.9 -m pip install --upgrade pip' command.\u001b[0m\n", "Note: you may need to restart the kernel to use updated packages.\n" ] } ], "source": [ "%pip install -U sqlite_utils" ] }, { "cell_type": "code", "execution_count": 2, "id": "050e85a8", "metadata": {}, "outputs": [], "source": [ "import sqlite_utils" ] }, { "cell_type": "markdown", "id": "348bcbfc", "metadata": {}, "source": [ "You can use the library with a database file on disk by running:\n", "\n", " db = sqlite_utils.Database(\"path/to/my/database.db\")\n", "\n", "In this tutorial we will use an in-memory database. This is a quick way to try out new things, though you should note that when you close the notebook the data store in the in-memory database will be lost." ] }, { "cell_type": "code", "execution_count": 3, "id": "4b2aee7e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ ">" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "db = sqlite_utils.Database(memory=True)\n", "db" ] }, { "cell_type": "markdown", "id": "1598ab43", "metadata": {}, "source": [ "## Creating a table\n", "\n", "We are going to create a new table in our database called `creatures` by passing in a Python list of dictionaries.\n", "\n", "`db[name_of_table]` will access a database table object with that name.\n", "\n", "Inserting data into that table will create it if it does not already exist." ] }, { "cell_type": "code", "execution_count": 4, "id": "4a0ac420", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "db[\"creatures\"].insert_all([{\n", " \"name\": \"Cleo\",\n", " \"species\": \"dog\",\n", " \"age\": 6\n", "}, {\n", " \"name\": \"Lila\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.8,\n", "}, {\n", " \"name\": \"Bants\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.8,\n", "}])" ] }, { "cell_type": "markdown", "id": "049d110b", "metadata": {}, "source": [ "Let's grab a `table` reference to the new creatures table:" ] }, { "cell_type": "code", "execution_count": 5, "id": "8d84ad9c", "metadata": {}, "outputs": [], "source": [ "table = db[\"creatures\"]" ] }, { "cell_type": "markdown", "id": "ffe45750", "metadata": {}, "source": [ "`sqlite-utils` automatically creates a table schema that matches the keys and data types of the dictionaries that were passed to `.insert_all()`.\n", "\n", "We can see that schema using `table.schema`:" ] }, { "cell_type": "code", "execution_count": 6, "id": "136cee1e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CREATE TABLE [creatures] (\n", " [name] TEXT,\n", " [species] TEXT,\n", " [age] FLOAT\n", ")\n" ] } ], "source": [ "print(table.schema)" ] }, { "cell_type": "markdown", "id": "9e5c3ae9", "metadata": {}, "source": [ "## Accessing data\n", "\n", "The `table.rows` property lets us loop through the rows in the table, returning each one as a Python dictionary:" ] }, { "cell_type": "code", "execution_count": 7, "id": "f812914d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{'name': 'Cleo', 'species': 'dog', 'age': 6.0}\n", "{'name': 'Lila', 'species': 'chicken', 'age': 0.8}\n", "{'name': 'Bants', 'species': 'chicken', 'age': 0.8}\n" ] } ], "source": [ "for row in table.rows:\n", " print(row)" ] }, { "cell_type": "markdown", "id": "60bc6b2c", "metadata": {}, "source": [ "The `db.query(sql)` method can be used to execute SQL queries and return the results as dictionaries:" ] }, { "cell_type": "code", "execution_count": 8, "id": "eaadd85f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'name': 'Cleo', 'species': 'dog', 'age': 6.0},\n", " {'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'name': 'Bants', 'species': 'chicken', 'age': 0.8}]" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"select * from creatures\"))" ] }, { "cell_type": "markdown", "id": "6614467b", "metadata": {}, "source": [ "Or in a loop:" ] }, { "cell_type": "code", "execution_count": 9, "id": "88fdd52e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cleo is a dog\n", "Lila is a chicken\n", "Bants is a chicken\n" ] } ], "source": [ "for row in db.query(\"select name, species from creatures\"):\n", " print(f'{row[\"name\"]} is a {row[\"species\"]}')" ] }, { "cell_type": "markdown", "id": "b81c031c", "metadata": {}, "source": [ "### SQL parameters\n", "\n", "You can run a parameterized query using `?` as placeholders and passing a list of variables. The variables you pass will be correctly quoted, protecting your code from SQL injection vulnerabilities." ] }, { "cell_type": "code", "execution_count": 10, "id": "267035d9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'name': 'Cleo', 'species': 'dog', 'age': 6.0}]" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"select * from creatures where age > ?\", [1.0]))" ] }, { "cell_type": "markdown", "id": "87cb301b", "metadata": {}, "source": [ "As an alternative to question marks we can use `:name` parameters and feed in the values using a dictionary:" ] }, { "cell_type": "code", "execution_count": 11, "id": "83be9a80", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'name': 'Bants', 'species': 'chicken', 'age': 0.8}]" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"select * from creatures where species = :species\", {\"species\": \"chicken\"}))" ] }, { "cell_type": "markdown", "id": "5e5179cc", "metadata": {}, "source": [ "### Primary keys\n", "\n", "When we created this table we did not specify a primary key. SQLite automatically creates a primary key called `rowid` if no other primary key is defined.\n", "\n", "We can run `select rowid, * from creatures` to see this hidden primary key:" ] }, { "cell_type": "code", "execution_count": 12, "id": "c9d963df", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'rowid': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0},\n", " {'rowid': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'rowid': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8}]" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"select rowid, * from creatures\"))" ] }, { "cell_type": "markdown", "id": "0f87cdfb", "metadata": {}, "source": [ "We can also see that using `table.pks_and_rows_where()`:" ] }, { "cell_type": "code", "execution_count": 13, "id": "d365e405", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1 {'rowid': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0}\n", "2 {'rowid': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8}\n", "3 {'rowid': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8}\n" ] } ], "source": [ "for pk, row in table.pks_and_rows_where():\n", " print(pk, row)" ] }, { "cell_type": "markdown", "id": "5b0e9b74", "metadata": {}, "source": [ "Let's recreate the table with our own primary key, which we will call `id`.\n", "\n", "`table.drop()` drops the table:" ] }, { "cell_type": "code", "execution_count": 14, "id": "568a0e29", "metadata": {}, "outputs": [], "source": [ "table.drop()" ] }, { "cell_type": "code", "execution_count": 15, "id": "13ebd3ab", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table" ] }, { "cell_type": "markdown", "id": "522aa6d0", "metadata": {}, "source": [ "We can see a list of tables in the database using `db.tables`:" ] }, { "cell_type": "code", "execution_count": 16, "id": "f3e62678", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[]" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "db.tables" ] }, { "cell_type": "markdown", "id": "6b80d523", "metadata": {}, "source": [ "We'll create the table again, this time with an `id` column.\n", "\n", "We use `pk=\"id\"` to specify that the `id` column should be treated as the primary key for the table:" ] }, { "cell_type": "code", "execution_count": 17, "id": "c9ee8b9f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "db[\"creatures\"].insert_all([{\n", " \"id\": 1,\n", " \"name\": \"Cleo\",\n", " \"species\": \"dog\",\n", " \"age\": 6\n", "}, {\n", " \"id\": 2,\n", " \"name\": \"Lila\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.8,\n", "}, {\n", " \"id\": 3,\n", " \"name\": \"Bants\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.8,\n", "}], pk=\"id\")" ] }, { "cell_type": "code", "execution_count": 18, "id": "523e01ab", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CREATE TABLE [creatures] (\n", " [id] INTEGER PRIMARY KEY,\n", " [name] TEXT,\n", " [species] TEXT,\n", " [age] FLOAT\n", ")\n" ] } ], "source": [ "print(table.schema)" ] }, { "cell_type": "markdown", "id": "811bea70", "metadata": {}, "source": [ "## Inserting more records\n", "\n", "We can call `.insert_all()` again to insert more records. Let's add two more chickens." ] }, { "cell_type": "code", "execution_count": 19, "id": "716df161", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.insert_all([{\n", " \"id\": 4,\n", " \"name\": \"Azi\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.8,\n", "}, {\n", " \"id\": 5,\n", " \"name\": \"Snowy\",\n", " \"species\": \"chicken\",\n", " \"age\": 0.9,\n", "}], pk=\"id\")" ] }, { "cell_type": "code", "execution_count": 20, "id": "4b1b2476", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0},\n", " {'id': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'id': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8},\n", " {'id': 4, 'name': 'Azi', 'species': 'chicken', 'age': 0.8},\n", " {'id': 5, 'name': 'Snowy', 'species': 'chicken', 'age': 0.9}]" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(table.rows)" ] }, { "cell_type": "markdown", "id": "2af4ae75", "metadata": {}, "source": [ "Since the `id` column is an integer primary key, we can insert a record without specifying an ID and one will be automatically added.\n", "\n", "Since we are only adding one record we will use `.insert()` instead of `.insert_all()`." ] }, { "cell_type": "code", "execution_count": 21, "id": "246c6dd5", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.insert({\"name\": \"Blue\", \"species\": \"chicken\", \"age\": 0.9})" ] }, { "cell_type": "markdown", "id": "d7c28e4d", "metadata": {}, "source": [ "We can use `table.last_pk` to see the ID of the record we just added." ] }, { "cell_type": "code", "execution_count": 22, "id": "de012e1e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "6" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.last_pk" ] }, { "cell_type": "markdown", "id": "c38edaf4", "metadata": {}, "source": [ "Here's the full list of rows again:" ] }, { "cell_type": "code", "execution_count": 23, "id": "7c27075e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0},\n", " {'id': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'id': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8},\n", " {'id': 4, 'name': 'Azi', 'species': 'chicken', 'age': 0.8},\n", " {'id': 5, 'name': 'Snowy', 'species': 'chicken', 'age': 0.9},\n", " {'id': 6, 'name': 'Blue', 'species': 'chicken', 'age': 0.9}]" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(table.rows)" ] }, { "cell_type": "markdown", "id": "64931bd0", "metadata": {}, "source": [ "If you try to add a new record with an existing ID, you will get an `IntegrityError`:" ] }, { "cell_type": "code", "execution_count": 24, "id": "36327794", "metadata": {}, "outputs": [ { "ename": "IntegrityError", "evalue": "UNIQUE constraint failed: creatures.id", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mIntegrityError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m\"id\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;36m6\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"name\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m\"Red\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"species\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m\"chicken\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"age\"\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;36m0.9\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-packages/sqlite_utils/db.py\u001b[0m in \u001b[0;36minsert\u001b[0;34m(self, record, pk, foreign_keys, column_order, not_null, defaults, hash_id, alter, ignore, replace, extracts, conversions, columns)\u001b[0m\n\u001b[1;32m 2027\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mDEFAULT\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2028\u001b[0m ):\n\u001b[0;32m-> 2029\u001b[0;31m return self.insert_all(\n\u001b[0m\u001b[1;32m 2030\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mrecord\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2031\u001b[0m \u001b[0mpk\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpk\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-packages/sqlite_utils/db.py\u001b[0m in \u001b[0;36minsert_all\u001b[0;34m(self, records, pk, foreign_keys, column_order, not_null, defaults, batch_size, hash_id, alter, ignore, replace, truncate, extracts, conversions, columns, upsert)\u001b[0m\n\u001b[1;32m 2143\u001b[0m \u001b[0mfirst\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2145\u001b[0;31m self.insert_chunk(\n\u001b[0m\u001b[1;32m 2146\u001b[0m \u001b[0malter\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2147\u001b[0m \u001b[0mextracts\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-packages/sqlite_utils/db.py\u001b[0m in \u001b[0;36minsert_chunk\u001b[0;34m(self, alter, extracts, chunk, all_columns, hash_id, upsert, pk, conversions, num_records_processed, replace, ignore)\u001b[0m\n\u001b[1;32m 1955\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mquery\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mqueries_and_params\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1956\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1957\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1958\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mOperationalError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1959\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0malter\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\" column\"\u001b[0m \u001b[0;32min\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/Cellar/jupyterlab/3.0.16_1/libexec/lib/python3.9/site-packages/sqlite_utils/db.py\u001b[0m in \u001b[0;36mexecute\u001b[0;34m(self, sql, parameters)\u001b[0m\n\u001b[1;32m 255\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tracer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 256\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mparameters\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 257\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msql\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 258\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 259\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msql\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mIntegrityError\u001b[0m: UNIQUE constraint failed: creatures.id" ] } ], "source": [ "table.insert({\"id\": 6, \"name\": \"Red\", \"species\": \"chicken\", \"age\": 0.9})" ] }, { "cell_type": "markdown", "id": "2e00692f", "metadata": {}, "source": [ "You can use `replace=True` to replace the matching record with a new one:" ] }, { "cell_type": "code", "execution_count": 25, "id": "2be75589", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.insert({\"id\": 6, \"name\": \"Red\", \"species\": \"chicken\", \"age\": 0.9}, replace=True)" ] }, { "cell_type": "code", "execution_count": 26, "id": "83281675", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0},\n", " {'id': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8},\n", " {'id': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8},\n", " {'id': 4, 'name': 'Azi', 'species': 'chicken', 'age': 0.8},\n", " {'id': 5, 'name': 'Snowy', 'species': 'chicken', 'age': 0.9},\n", " {'id': 6, 'name': 'Red', 'species': 'chicken', 'age': 0.9}]" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(table.rows)" ] }, { "cell_type": "markdown", "id": "d7122b76", "metadata": {}, "source": [ "## Updating a record\n", "\n", "We will rename that row back to `Blue`, this time using the `table.update(pk, updates)` method:" ] }, { "cell_type": "code", "execution_count": 28, "id": "43df156d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.update(6, {\"name\": \"Blue\"})" ] }, { "cell_type": "code", "execution_count": 32, "id": "0b8f8422", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 6, 'name': 'Blue', 'species': 'chicken', 'age': 0.9}]" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"select * from creatures where id = ?\", [6]))" ] }, { "cell_type": "markdown", "id": "58142b86", "metadata": {}, "source": [ "## Extracting one of the columns into another table\n", "\n", "Our current table has a `species` column with a string in it - let's pull that out into a separate table.\n", "\n", "We can do that using the [table.extract() method](https://sqlite-utils.datasette.io/en/stable/python-api.html#extracting-columns-into-a-separate-table)." ] }, { "cell_type": "code", "execution_count": 34, "id": "6ab69111", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "
" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "table.extract(\"species\")" ] }, { "cell_type": "markdown", "id": "dca327b2", "metadata": {}, "source": [ "We now have a new table called `species`, which we can see using the `db.tables` method:" ] }, { "cell_type": "code", "execution_count": 35, "id": "76e95b36", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[
,
]" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "db.tables" ] }, { "cell_type": "markdown", "id": "5ea43bf5", "metadata": {}, "source": [ "Our creatures table has been modified - instead of a `species` column it now has `species_id` which is a foreign key to the new table:" ] }, { "cell_type": "code", "execution_count": 37, "id": "c0438bff", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CREATE TABLE \"creatures\" (\n", " [id] INTEGER PRIMARY KEY,\n", " [name] TEXT,\n", " [species_id] INTEGER,\n", " [age] FLOAT,\n", " FOREIGN KEY([species_id]) REFERENCES [species]([id])\n", ")\n", "[{'id': 1, 'name': 'Cleo', 'species_id': 1, 'age': 6.0}, {'id': 2, 'name': 'Lila', 'species_id': 2, 'age': 0.8}, {'id': 3, 'name': 'Bants', 'species_id': 2, 'age': 0.8}, {'id': 4, 'name': 'Azi', 'species_id': 2, 'age': 0.8}, {'id': 5, 'name': 'Snowy', 'species_id': 2, 'age': 0.9}, {'id': 6, 'name': 'Blue', 'species_id': 2, 'age': 0.9}]\n" ] } ], "source": [ "print(db[\"creatures\"].schema)\n", "print(list(db[\"creatures\"].rows))" ] }, { "cell_type": "markdown", "id": "0452c201", "metadata": {}, "source": [ "The new `species` table has been created and populated too:" ] }, { "cell_type": "code", "execution_count": 39, "id": "5d38c3a8", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "CREATE TABLE [species] (\n", " [id] INTEGER PRIMARY KEY,\n", " [species] TEXT\n", ")\n", "[{'id': 1, 'species': 'dog'}, {'id': 2, 'species': 'chicken'}]\n" ] } ], "source": [ "print(db[\"species\"].schema)\n", "print(list(db[\"species\"].rows))" ] }, { "cell_type": "markdown", "id": "a0312d1e", "metadata": {}, "source": [ "We can use a join SQL query to combine data from these two tables:" ] }, { "cell_type": "code", "execution_count": 44, "id": "6734ed5d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[{'id': 1, 'name': 'Cleo', 'age': 6.0, 'species_id': 1, 'species': 'dog'},\n", " {'id': 2, 'name': 'Lila', 'age': 0.8, 'species_id': 2, 'species': 'chicken'},\n", " {'id': 3, 'name': 'Bants', 'age': 0.8, 'species_id': 2, 'species': 'chicken'},\n", " {'id': 4, 'name': 'Azi', 'age': 0.8, 'species_id': 2, 'species': 'chicken'},\n", " {'id': 5, 'name': 'Snowy', 'age': 0.9, 'species_id': 2, 'species': 'chicken'},\n", " {'id': 6, 'name': 'Blue', 'age': 0.9, 'species_id': 2, 'species': 'chicken'}]" ] }, "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(db.query(\"\"\"\n", " select\n", " creatures.id,\n", " creatures.name,\n", " creatures.age,\n", " species.id as species_id,\n", " species.species\n", " from creatures\n", " join species on creatures.species_id = species.id\n", "\"\"\"))" ] }, { "cell_type": "code", "execution_count": null, "id": "5c4802ac", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.6" } }, "nbformat": 4, "nbformat_minor": 5 } sqlite-utils-3.35.2/mypy.ini000066400000000000000000000001111452131415600157350ustar00rootroot00000000000000[mypy] [mypy-pysqlite3,sqlean,sqlite_dump] ignore_missing_imports = Truesqlite-utils-3.35.2/setup.cfg000066400000000000000000000001461452131415600160670ustar00rootroot00000000000000[flake8] max-line-length = 160 # Black compatibility, E203 whitespace before ':': extend-ignore = E203sqlite-utils-3.35.2/setup.py000066400000000000000000000051421452131415600157610ustar00rootroot00000000000000from setuptools import setup, find_packages import io import os VERSION = "3.35.2" def get_long_description(): with io.open( os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"), encoding="utf8", ) as fp: return fp.read() setup( name="sqlite-utils", description="CLI tool and Python library for manipulating SQLite databases", long_description=get_long_description(), long_description_content_type="text/markdown", author="Simon Willison", version=VERSION, license="Apache License, Version 2.0", packages=find_packages(exclude=["tests", "tests.*"]), package_data={"sqlite_utils": ["py.typed"]}, install_requires=[ "sqlite-fts4", "click", "click-default-group>=1.2.3", "tabulate", "python-dateutil", "pluggy", ], extras_require={ "test": ["pytest", "black", "hypothesis", "cogapp"], "docs": [ "furo", "sphinx-autobuild", "codespell", "sphinx-copybutton", "beanbag-docutils>=2.0", "pygments-csv-lexer", ], "mypy": [ "mypy", "types-click", "types-tabulate", "types-python-dateutil", "types-pluggy", "data-science-types", ], "flake8": ["flake8"], "tui": ["trogon"], }, entry_points=""" [console_scripts] sqlite-utils=sqlite_utils.cli:cli """, url="https://github.com/simonw/sqlite-utils", project_urls={ "Documentation": "https://sqlite-utils.datasette.io/en/stable/", "Changelog": "https://sqlite-utils.datasette.io/en/stable/changelog.html", "Source code": "https://github.com/simonw/sqlite-utils", "Issues": "https://github.com/simonw/sqlite-utils/issues", "CI": "https://github.com/simonw/sqlite-utils/actions", }, python_requires=">=3.7", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Intended Audience :: End Users/Desktop", "Topic :: Database", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ], # Needed to bundle py.typed so mypy can see it: zip_safe=False, ) sqlite-utils-3.35.2/sqlite_utils/000077500000000000000000000000001452131415600167665ustar00rootroot00000000000000sqlite-utils-3.35.2/sqlite_utils/__init__.py000066400000000000000000000003111452131415600210720ustar00rootroot00000000000000from .utils import suggest_column_types from .hookspecs import hookimpl from .hookspecs import hookspec from .db import Database __all__ = ["Database", "suggest_column_types", "hookimpl", "hookspec"] sqlite-utils-3.35.2/sqlite_utils/__main__.py000066400000000000000000000000731452131415600210600ustar00rootroot00000000000000from .cli import cli if __name__ == "__main__": cli() sqlite-utils-3.35.2/sqlite_utils/cli.py000066400000000000000000002554331452131415600201230ustar00rootroot00000000000000import base64 import click from click_default_group import DefaultGroup # type: ignore from datetime import datetime import hashlib import pathlib from runpy import run_module import sqlite_utils from sqlite_utils.db import AlterError, BadMultiValues, DescIndex, NoTable from sqlite_utils.plugins import pm, get_plugins from sqlite_utils.utils import maximize_csv_field_size_limit from sqlite_utils import recipes import textwrap import inspect import io import itertools import json import os import pdb import sys import csv as csv_std import tabulate from .utils import ( OperationalError, _compile_code, chunks, file_progress, find_spatialite, flatten as _flatten, sqlite3, decode_base64_values, progressbar, rows_from_file, Format, TypeTracker, ) try: import trogon # type: ignore except ImportError: trogon = None CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) VALID_COLUMN_TYPES = ("INTEGER", "TEXT", "FLOAT", "BLOB") UNICODE_ERROR = """ {} The input you provided uses a character encoding other than utf-8. You can fix this by passing the --encoding= option with the encoding of the file. If you do not know the encoding, running 'file filename.csv' may tell you. It's often worth trying: --encoding=latin-1 """.strip() maximize_csv_field_size_limit() def output_options(fn): for decorator in reversed( ( click.option( "--nl", help="Output newline-delimited JSON", is_flag=True, default=False, ), click.option( "--arrays", help="Output rows as arrays instead of objects", is_flag=True, default=False, ), click.option("--csv", is_flag=True, help="Output CSV"), click.option("--tsv", is_flag=True, help="Output TSV"), click.option("--no-headers", is_flag=True, help="Omit CSV headers"), click.option( "-t", "--table", is_flag=True, help="Output as a formatted table" ), click.option( "--fmt", help="Table format - one of {}".format( ", ".join(tabulate.tabulate_formats) ), ), click.option( "--json-cols", help="Detect JSON cols and output them as JSON, not escaped strings", is_flag=True, default=False, ), ) ): fn = decorator(fn) return fn def load_extension_option(fn): return click.option( "--load-extension", multiple=True, help="Path to SQLite extension, with optional :entrypoint", )(fn) @click.group( cls=DefaultGroup, default="query", default_if_no_args=True, context_settings=CONTEXT_SETTINGS, ) @click.version_option() def cli(): "Commands for interacting with a SQLite database" pass if trogon is not None: cli = trogon.tui()(cli) @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.option( "--fts4", help="Just show FTS4 enabled tables", default=False, is_flag=True ) @click.option( "--fts5", help="Just show FTS5 enabled tables", default=False, is_flag=True ) @click.option( "--counts", help="Include row counts per table", default=False, is_flag=True ) @output_options @click.option( "--columns", help="Include list of columns for each table", is_flag=True, default=False, ) @click.option( "--schema", help="Include schema for each table", is_flag=True, default=False, ) @load_extension_option def tables( path, fts4, fts5, counts, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, columns, schema, load_extension, views=False, ): """List the tables in the database Example: \b sqlite-utils tables trees.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) headers = ["view" if views else "table"] if counts: headers.append("count") if columns: headers.append("columns") if schema: headers.append("schema") def _iter(): if views: items = db.view_names() else: items = db.table_names(fts4=fts4, fts5=fts5) for name in items: row = [name] if counts: row.append(db[name].count) if columns: cols = [c.name for c in db[name].columns] if csv: row.append("\n".join(cols)) else: row.append(cols) if schema: row.append(db[name].schema) yield row if table or fmt: print(tabulate.tabulate(_iter(), headers=headers, tablefmt=fmt or "simple")) elif csv or tsv: writer = csv_std.writer(sys.stdout, dialect="excel-tab" if tsv else "excel") if not no_headers: writer.writerow(headers) for row in _iter(): writer.writerow(row) else: for line in output_rows(_iter(), headers, nl, arrays, json_cols): click.echo(line) @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.option( "--counts", help="Include row counts per view", default=False, is_flag=True ) @output_options @click.option( "--columns", help="Include list of columns for each view", is_flag=True, default=False, ) @click.option( "--schema", help="Include schema for each view", is_flag=True, default=False, ) @load_extension_option def views( path, counts, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, columns, schema, load_extension, ): """List the views in the database Example: \b sqlite-utils views trees.db """ tables.callback( path=path, fts4=False, fts5=False, counts=counts, nl=nl, arrays=arrays, csv=csv, tsv=tsv, no_headers=no_headers, table=table, fmt=fmt, json_cols=json_cols, columns=columns, schema=schema, load_extension=load_extension, views=True, ) @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1) @click.option("--no-vacuum", help="Don't run VACUUM", default=False, is_flag=True) @load_extension_option def optimize(path, tables, no_vacuum, load_extension): """Optimize all full-text search tables and then run VACUUM - should shrink the database file Example: \b sqlite-utils optimize chickens.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if not tables: tables = db.table_names(fts4=True) + db.table_names(fts5=True) with db.conn: for table in tables: db[table].optimize() if not no_vacuum: db.vacuum() @cli.command(name="rebuild-fts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1) @load_extension_option def rebuild_fts(path, tables, load_extension): """Rebuild all or specific full-text search tables Example: \b sqlite-utils rebuild-fts chickens.db chickens """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if not tables: tables = db.table_names(fts4=True) + db.table_names(fts5=True) with db.conn: for table in tables: db[table].rebuild_fts() @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("names", nargs=-1) def analyze(path, names): """Run ANALYZE against the whole database, or against specific named indexes and tables Example: \b sqlite-utils analyze chickens.db """ db = sqlite_utils.Database(path) try: if names: for name in names: db.analyze(name) else: db.analyze() except OperationalError as e: raise click.ClickException(e) @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) def vacuum(path): """Run VACUUM against the database Example: \b sqlite-utils vacuum chickens.db """ sqlite_utils.Database(path).vacuum() @cli.command() @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @load_extension_option def dump(path, load_extension): """Output a SQL dump of the schema and full contents of the database Example: \b sqlite-utils dump chickens.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) for line in db.iterdump(): click.echo(line) @cli.command(name="add-column") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("col_name") @click.argument( "col_type", type=click.Choice( ["integer", "float", "blob", "text", "INTEGER", "FLOAT", "BLOB", "TEXT"] ), required=False, ) @click.option( "--fk", type=str, required=False, help="Table to reference as a foreign key" ) @click.option( "--fk-col", type=str, required=False, help="Referenced column on that foreign key table - if omitted will automatically use the primary key", ) @click.option( "--not-null-default", type=str, required=False, help="Add NOT NULL DEFAULT 'TEXT' constraint", ) @click.option( "--ignore", is_flag=True, help="If column already exists, do nothing", ) @load_extension_option def add_column( path, table, col_name, col_type, fk, fk_col, not_null_default, ignore, load_extension, ): """Add a column to the specified table Example: \b sqlite-utils add-column chickens.db chickens weight float """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[table].add_column( col_name, col_type, fk=fk, fk_col=fk_col, not_null_default=not_null_default ) except OperationalError as ex: if not ignore: raise click.ClickException(str(ex)) @cli.command(name="add-foreign-key") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("column") @click.argument("other_table", required=False) @click.argument("other_column", required=False) @click.option( "--ignore", is_flag=True, help="If foreign key already exists, do nothing", ) @load_extension_option def add_foreign_key( path, table, column, other_table, other_column, ignore, load_extension ): """ Add a new foreign key constraint to an existing table Example: sqlite-utils add-foreign-key my.db books author_id authors id """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[table].add_foreign_key(column, other_table, other_column, ignore=ignore) except AlterError as e: raise click.ClickException(e) @cli.command(name="add-foreign-keys") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("foreign_key", nargs=-1) @load_extension_option def add_foreign_keys(path, foreign_key, load_extension): """ Add multiple new foreign key constraints to a database Example: \b sqlite-utils add-foreign-keys my.db \\ books author_id authors id \\ authors country_id countries id """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if len(foreign_key) % 4 != 0: raise click.ClickException( "Each foreign key requires four values: table, column, other_table, other_column" ) tuples = [] for i in range(len(foreign_key) // 4): tuples.append(tuple(foreign_key[i * 4 : (i * 4) + 4])) try: db.add_foreign_keys(tuples) except AlterError as e: raise click.ClickException(e) @cli.command(name="index-foreign-keys") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @load_extension_option def index_foreign_keys(path, load_extension): """ Ensure every foreign key column has an index on it Example: \b sqlite-utils index-foreign-keys chickens.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) db.index_foreign_keys() @cli.command(name="create-index") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("column", nargs=-1, required=True) @click.option("--name", help="Explicit name for the new index") @click.option("--unique", help="Make this a unique index", default=False, is_flag=True) @click.option( "--if-not-exists", "--ignore", help="Ignore if index already exists", default=False, is_flag=True, ) @click.option( "--analyze", help="Run ANALYZE after creating the index", is_flag=True, ) @load_extension_option def create_index( path, table, column, name, unique, if_not_exists, analyze, load_extension ): """ Add an index to the specified table for the specified columns Example: \b sqlite-utils create-index chickens.db chickens name To create an index in descending order: \b sqlite-utils create-index chickens.db chickens -- -name """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) # Treat -prefix as descending for columns columns = [] for col in column: if col.startswith("-"): col = DescIndex(col[1:]) columns.append(col) db[table].create_index( columns, index_name=name, unique=unique, if_not_exists=if_not_exists, analyze=analyze, ) @cli.command(name="enable-fts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("column", nargs=-1, required=True) @click.option("--fts4", help="Use FTS4", default=False, is_flag=True) @click.option("--fts5", help="Use FTS5", default=False, is_flag=True) @click.option("--tokenize", help="Tokenizer to use, e.g. porter") @click.option( "--create-triggers", help="Create triggers to update the FTS tables when the parent table changes.", default=False, is_flag=True, ) @click.option( "--replace", is_flag=True, help="Replace existing FTS configuration if it exists", ) @load_extension_option def enable_fts( path, table, column, fts4, fts5, tokenize, create_triggers, replace, load_extension ): """Enable full-text search for specific table and columns" Example: \b sqlite-utils enable-fts chickens.db chickens name """ fts_version = "FTS5" if fts4 and fts5: click.echo("Can only use one of --fts4 or --fts5", err=True) return elif fts4: fts_version = "FTS4" db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[table].enable_fts( column, fts_version=fts_version, tokenize=tokenize, create_triggers=create_triggers, replace=replace, ) except OperationalError as ex: raise click.ClickException(ex) @cli.command(name="populate-fts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("column", nargs=-1, required=True) @load_extension_option def populate_fts(path, table, column, load_extension): """Re-populate full-text search for specific table and columns Example: \b sqlite-utils populate-fts chickens.db chickens name """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) db[table].populate_fts(column) @cli.command(name="disable-fts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @load_extension_option def disable_fts(path, table, load_extension): """Disable full-text search for specific table Example: \b sqlite-utils disable-fts chickens.db chickens """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) db[table].disable_fts() @cli.command(name="enable-wal") @click.argument( "path", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @load_extension_option def enable_wal(path, load_extension): """Enable WAL for database files Example: \b sqlite-utils enable-wal chickens.db """ for path_ in path: db = sqlite_utils.Database(path_) _load_extensions(db, load_extension) db.enable_wal() @cli.command(name="disable-wal") @click.argument( "path", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @load_extension_option def disable_wal(path, load_extension): """Disable WAL for database files Example: \b sqlite-utils disable-wal chickens.db """ for path_ in path: db = sqlite_utils.Database(path_) _load_extensions(db, load_extension) db.disable_wal() @cli.command(name="enable-counts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1) @load_extension_option def enable_counts(path, tables, load_extension): """Configure triggers to update a _counts table with row counts Example: \b sqlite-utils enable-counts chickens.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if not tables: db.enable_counts() else: # Check all tables exist bad_tables = [table for table in tables if not db[table].exists()] if bad_tables: raise click.ClickException("Invalid tables: {}".format(bad_tables)) for table in tables: db[table].enable_counts() @cli.command(name="reset-counts") @click.argument( "path", type=click.Path(exists=True, file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @load_extension_option def reset_counts(path, load_extension): """Reset calculated counts in the _counts table Example: \b sqlite-utils reset-counts chickens.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) db.reset_counts() _import_options = ( click.option( "--flatten", is_flag=True, help='Flatten nested JSON objects, so {"a": {"b": 1}} becomes {"a_b": 1}', ), click.option("--nl", is_flag=True, help="Expect newline-delimited JSON"), click.option("-c", "--csv", is_flag=True, help="Expect CSV input"), click.option("--tsv", is_flag=True, help="Expect TSV input"), click.option("--empty-null", is_flag=True, help="Treat empty strings as NULL"), click.option( "--lines", is_flag=True, help="Treat each line as a single value called 'line'", ), click.option( "--text", is_flag=True, help="Treat input as a single value called 'text'", ), click.option("--convert", help="Python code to convert each item"), click.option( "--import", "imports", type=str, multiple=True, help="Python modules to import", ), click.option("--delimiter", help="Delimiter to use for CSV files"), click.option("--quotechar", help="Quote character to use for CSV/TSV"), click.option("--sniff", is_flag=True, help="Detect delimiter and quote character"), click.option("--no-headers", is_flag=True, help="CSV file has no header row"), click.option( "--encoding", help="Character encoding for input, defaults to utf-8", ), ) def import_options(fn): for decorator in reversed(_import_options): fn = decorator(fn) return fn def insert_upsert_options(*, require_pk=False): def inner(fn): for decorator in reversed( ( click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ), click.argument("table"), click.argument("file", type=click.File("rb"), required=True), click.option( "--pk", help="Columns to use as the primary key, e.g. id", multiple=True, required=require_pk, ), ) + _import_options + ( click.option( "--batch-size", type=int, default=100, help="Commit every X records" ), click.option("--stop-after", type=int, help="Stop after X records"), click.option( "--alter", is_flag=True, help="Alter existing table to add any missing columns", ), click.option( "--not-null", multiple=True, help="Columns that should be created as NOT NULL", ), click.option( "--default", multiple=True, type=(str, str), help="Default value that should be set for a column", ), click.option( "-d", "--detect-types", is_flag=True, envvar="SQLITE_UTILS_DETECT_TYPES", help="Detect types for columns in CSV/TSV data", ), click.option( "--analyze", is_flag=True, help="Run ANALYZE at the end of this operation", ), load_extension_option, click.option("--silent", is_flag=True, help="Do not show progress bar"), ) ): fn = decorator(fn) return fn return inner def insert_upsert_implementation( path, table, file, pk, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, delimiter, quotechar, sniff, no_headers, encoding, batch_size, stop_after, alter, upsert, ignore=False, replace=False, truncate=False, not_null=None, default=None, detect_types=None, analyze=False, load_extension=None, silent=False, bulk_sql=None, functions=None, ): db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if functions: _register_functions(db, functions) if (delimiter or quotechar or sniff or no_headers) and not tsv: csv = True if (nl + csv + tsv) >= 2: raise click.ClickException("Use just one of --nl, --csv or --tsv") if (csv or tsv) and flatten: raise click.ClickException("--flatten cannot be used with --csv or --tsv") if empty_null and not (csv or tsv): raise click.ClickException("--empty-null can only be used with --csv or --tsv") if encoding and not (csv or tsv): raise click.ClickException("--encoding must be used with --csv or --tsv") if pk and len(pk) == 1: pk = pk[0] encoding = encoding or "utf-8-sig" # The --sniff option needs us to buffer the file to peek ahead sniff_buffer = None decoded_buffer = None if sniff: sniff_buffer = io.BufferedReader(file, buffer_size=4096) decoded_buffer = io.TextIOWrapper(sniff_buffer, encoding=encoding) else: decoded_buffer = io.TextIOWrapper(file, encoding=encoding) tracker = None with file_progress(decoded_buffer, silent=silent) as decoded: if csv or tsv: if sniff: # Read first 2048 bytes and use that to detect first_bytes = sniff_buffer.peek(2048) dialect = csv_std.Sniffer().sniff( first_bytes.decode(encoding, "ignore") ) else: dialect = "excel-tab" if tsv else "excel" csv_reader_args = {"dialect": dialect} if delimiter: csv_reader_args["delimiter"] = delimiter if quotechar: csv_reader_args["quotechar"] = quotechar reader = csv_std.reader(decoded, **csv_reader_args) first_row = next(reader) if no_headers: headers = ["untitled_{}".format(i + 1) for i in range(len(first_row))] reader = itertools.chain([first_row], reader) else: headers = first_row if empty_null: docs = ( dict(zip(headers, [None if cell == "" else cell for cell in row])) for row in reader ) else: docs = (dict(zip(headers, row)) for row in reader) if detect_types: tracker = TypeTracker() docs = tracker.wrap(docs) elif lines: docs = ({"line": line.strip()} for line in decoded) elif text: docs = ({"text": decoded.read()},) else: try: if nl: docs = (json.loads(line) for line in decoded if line.strip()) else: docs = json.load(decoded) if isinstance(docs, dict): docs = [docs] except json.decoder.JSONDecodeError as ex: raise click.ClickException( "Invalid JSON - use --csv for CSV or --tsv for TSV files\n\nJSON error: {}".format( ex ) ) if flatten: docs = (_flatten(doc) for doc in docs) if stop_after: docs = itertools.islice(docs, stop_after) if convert: variable = "row" if lines: variable = "line" elif text: variable = "text" fn = _compile_code(convert, imports, variable=variable) if lines: docs = (fn(doc["line"]) for doc in docs) elif text: # Special case: this is allowed to be an iterable text_value = list(docs)[0]["text"] fn_return = fn(text_value) if isinstance(fn_return, dict): docs = [fn_return] else: try: docs = iter(fn_return) except TypeError: raise click.ClickException( "--convert must return dict or iterator" ) else: docs = (fn(doc) or doc for doc in docs) extra_kwargs = { "ignore": ignore, "replace": replace, "truncate": truncate, "analyze": analyze, } if not_null: extra_kwargs["not_null"] = set(not_null) if default: extra_kwargs["defaults"] = dict(default) if upsert: extra_kwargs["upsert"] = upsert # docs should all be dictionaries docs = (verify_is_dict(doc) for doc in docs) # Apply {"$base64": true, ...} decoding, if needed docs = (decode_base64_values(doc) for doc in docs) # For bulk_sql= we use cursor.executemany() instead if bulk_sql: if batch_size: doc_chunks = chunks(docs, batch_size) else: doc_chunks = [docs] for doc_chunk in doc_chunks: with db.conn: db.conn.cursor().executemany(bulk_sql, doc_chunk) return try: db[table].insert_all( docs, pk=pk, batch_size=batch_size, alter=alter, **extra_kwargs ) except Exception as e: if ( isinstance(e, OperationalError) and e.args and "has no column named" in e.args[0] ): raise click.ClickException( "{}\n\nTry using --alter to add additional columns".format( e.args[0] ) ) # If we can find sql= and parameters= arguments, show those variables = _find_variables(e.__traceback__, ["sql", "parameters"]) if "sql" in variables and "parameters" in variables: raise click.ClickException( "{}\n\nsql = {}\nparameters = {}".format( str(e), variables["sql"], variables["parameters"] ) ) else: raise if tracker is not None: db[table].transform(types=tracker.types) # Clean up open file-like objects if sniff_buffer: sniff_buffer.close() if decoded_buffer: decoded_buffer.close() def _find_variables(tb, vars): to_find = list(vars) found = {} for var in to_find: if var in tb.tb_frame.f_locals: vars.remove(var) found[var] = tb.tb_frame.f_locals[var] if vars and tb.tb_next: found.update(_find_variables(tb.tb_next, vars)) return found @cli.command() @insert_upsert_options() @click.option( "--ignore", is_flag=True, default=False, help="Ignore records if pk already exists" ) @click.option( "--replace", is_flag=True, default=False, help="Replace records if pk already exists", ) @click.option( "--truncate", is_flag=True, default=False, help="Truncate table before inserting records, if table already exists", ) def insert( path, table, file, pk, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, delimiter, quotechar, sniff, no_headers, encoding, batch_size, stop_after, alter, detect_types, analyze, load_extension, silent, ignore, replace, truncate, not_null, default, ): """ Insert records from FILE into a table, creating the table if it does not already exist. Example: echo '{"name": "Lila"}' | sqlite-utils insert data.db chickens - By default the input is expected to be a JSON object or array of objects. \b - Use --nl for newline-delimited JSON objects - Use --csv or --tsv for comma-separated or tab-separated input - Use --lines to write each incoming line to a column called "line" - Use --text to write the entire input to a column called "text" You can also use --convert to pass a fragment of Python code that will be used to convert each input. Your Python code will be passed a "row" variable representing the imported row, and can return a modified row. This example uses just the name, latitude and longitude columns from a CSV file, converting name to upper case and latitude and longitude to floating point numbers: \b sqlite-utils insert plants.db plants plants.csv --csv --convert ' return { "name": row["name"].upper(), "latitude": float(row["latitude"]), "longitude": float(row["longitude"]), }' If you are using --lines your code will be passed a "line" variable, and for --text a "text" variable. When using --text your function can return an iterator of rows to insert. This example inserts one record per word in the input: \b echo 'A bunch of words' | sqlite-utils insert words.db words - \\ --text --convert '({"word": w} for w in text.split())' """ try: insert_upsert_implementation( path, table, file, pk, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, delimiter, quotechar, sniff, no_headers, encoding, batch_size, stop_after, alter=alter, upsert=False, ignore=ignore, replace=replace, truncate=truncate, detect_types=detect_types, analyze=analyze, load_extension=load_extension, silent=silent, not_null=not_null, default=default, ) except UnicodeDecodeError as ex: raise click.ClickException(UNICODE_ERROR.format(ex)) @cli.command() @insert_upsert_options(require_pk=True) def upsert( path, table, file, pk, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, batch_size, stop_after, delimiter, quotechar, sniff, no_headers, encoding, alter, not_null, default, detect_types, analyze, load_extension, silent, ): """ Upsert records based on their primary key. Works like 'insert' but if an incoming record has a primary key that matches an existing record the existing record will be updated. Example: \b echo '[ {"id": 1, "name": "Lila"}, {"id": 2, "name": "Suna"} ]' | sqlite-utils upsert data.db chickens - --pk id """ try: insert_upsert_implementation( path, table, file, pk, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, delimiter, quotechar, sniff, no_headers, encoding, batch_size, stop_after, alter=alter, upsert=True, not_null=not_null, default=default, detect_types=detect_types, analyze=analyze, load_extension=load_extension, silent=silent, ) except UnicodeDecodeError as ex: raise click.ClickException(UNICODE_ERROR.format(ex)) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("sql") @click.argument("file", type=click.File("rb"), required=True) @click.option("--batch-size", type=int, default=100, help="Commit every X records") @click.option( "--functions", help="Python code defining one or more custom SQL functions" ) @import_options @load_extension_option def bulk( path, sql, file, batch_size, functions, flatten, nl, csv, tsv, empty_null, lines, text, convert, imports, delimiter, quotechar, sniff, no_headers, encoding, load_extension, ): """ Execute parameterized SQL against the provided list of documents. Example: \b echo '[ {"id": 1, "name": "Lila2"}, {"id": 2, "name": "Suna2"} ]' | sqlite-utils bulk data.db ' update chickens set name = :name where id = :id ' - """ try: insert_upsert_implementation( path=path, table=None, file=file, pk=None, flatten=flatten, nl=nl, csv=csv, tsv=tsv, empty_null=empty_null, lines=lines, text=text, convert=convert, imports=imports, delimiter=delimiter, quotechar=quotechar, sniff=sniff, no_headers=no_headers, encoding=encoding, batch_size=batch_size, stop_after=None, alter=False, upsert=False, not_null=set(), default={}, detect_types=False, load_extension=load_extension, silent=False, bulk_sql=sql, functions=functions, ) except (OperationalError, sqlite3.IntegrityError) as e: raise click.ClickException(str(e)) @cli.command(name="create-database") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.option( "--enable-wal", is_flag=True, help="Enable WAL mode on the created database" ) @click.option( "--init-spatialite", is_flag=True, help="Enable SpatiaLite on the created database" ) @load_extension_option def create_database(path, enable_wal, init_spatialite, load_extension): """Create a new empty database file Example: \b sqlite-utils create-database trees.db """ db = sqlite_utils.Database(path) if enable_wal: db.enable_wal() # load spatialite or another extension from a custom location if load_extension: _load_extensions(db, load_extension) # load spatialite from expected locations and initialize metadata if init_spatialite: db.init_spatialite() db.vacuum() @cli.command(name="create-table") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("columns", nargs=-1, required=True) @click.option("--pk", help="Column to use as primary key") @click.option( "--not-null", multiple=True, help="Columns that should be created as NOT NULL", ) @click.option( "--default", multiple=True, type=(str, str), help="Default value that should be set for a column", ) @click.option( "--fk", multiple=True, type=(str, str, str), help="Column, other table, other column to set as a foreign key", ) @click.option( "--ignore", is_flag=True, help="If table already exists, do nothing", ) @click.option( "--replace", is_flag=True, help="If table already exists, replace it", ) @click.option( "--transform", is_flag=True, help="If table already exists, try to transform the schema", ) @load_extension_option def create_table( path, table, columns, pk, not_null, default, fk, ignore, replace, transform, load_extension, ): """ Add a table with the specified columns. Columns should be specified using name, type pairs, for example: \b sqlite-utils create-table my.db people \\ id integer \\ name text \\ height float \\ photo blob --pk id Valid column types are text, integer, float and blob. """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if len(columns) % 2 == 1: raise click.ClickException( "columns must be an even number of 'name' 'type' pairs" ) coltypes = {} columns = list(columns) while columns: name = columns.pop(0) ctype = columns.pop(0) if ctype.upper() not in VALID_COLUMN_TYPES: raise click.ClickException( "column types must be one of {}".format(VALID_COLUMN_TYPES) ) coltypes[name] = ctype.upper() # Does table already exist? if table in db.table_names(): if not ignore and not replace and not transform: raise click.ClickException( 'Table "{}" already exists. Use --replace to delete and replace it.'.format( table ) ) db[table].create( coltypes, pk=pk, not_null=not_null, defaults=dict(default), foreign_keys=fk, ignore=ignore, replace=replace, transform=transform, ) @cli.command(name="duplicate") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("new_table") @click.option("--ignore", is_flag=True, help="If table does not exist, do nothing") @load_extension_option def duplicate(path, table, new_table, ignore, load_extension): """ Create a duplicate of this table, copying across the schema and all row data. """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[table].duplicate(new_table) except NoTable: if not ignore: raise click.ClickException('Table "{}" does not exist'.format(table)) @cli.command(name="rename-table") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("new_name") @click.option("--ignore", is_flag=True, help="If table does not exist, do nothing") @load_extension_option def rename_table(path, table, new_name, ignore, load_extension): """ Rename this table. """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db.rename_table(table, new_name) except sqlite3.OperationalError as ex: if not ignore: raise click.ClickException( 'Table "{}" could not be renamed. {}'.format(table, str(ex)) ) @cli.command(name="drop-table") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.option("--ignore", is_flag=True, help="If table does not exist, do nothing") @load_extension_option def drop_table(path, table, ignore, load_extension): """Drop the specified table Example: \b sqlite-utils drop-table chickens.db chickens """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[table].drop(ignore=ignore) except OperationalError: raise click.ClickException('Table "{}" does not exist'.format(table)) @cli.command(name="create-view") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("view") @click.argument("select") @click.option( "--ignore", is_flag=True, help="If view already exists, do nothing", ) @click.option( "--replace", is_flag=True, help="If view already exists, replace it", ) @load_extension_option def create_view(path, view, select, ignore, replace, load_extension): """Create a view for the provided SELECT query Example: \b sqlite-utils create-view chickens.db heavy_chickens \\ 'select * from chickens where weight > 3' """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) # Does view already exist? if view in db.view_names(): if ignore: return elif replace: db[view].drop() else: raise click.ClickException( 'View "{}" already exists. Use --replace to delete and replace it.'.format( view ) ) db.create_view(view, select) @cli.command(name="drop-view") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("view") @click.option("--ignore", is_flag=True, help="If view does not exist, do nothing") @load_extension_option def drop_view(path, view, ignore, load_extension): """Drop the specified view Example: \b sqlite-utils drop-view chickens.db heavy_chickens """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: db[view].drop(ignore=ignore) except OperationalError: raise click.ClickException('View "{}" does not exist'.format(view)) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("sql") @click.option( "--attach", type=(str, click.Path(file_okay=True, dir_okay=False, allow_dash=False)), multiple=True, help="Additional databases to attach - specify alias and filepath", ) @output_options @click.option("-r", "--raw", is_flag=True, help="Raw output, first column of first row") @click.option("--raw-lines", is_flag=True, help="Raw output, first column of each row") @click.option( "-p", "--param", multiple=True, type=(str, str), help="Named :parameters for SQL query", ) @click.option( "--functions", help="Python code defining one or more custom SQL functions" ) @load_extension_option def query( path, sql, attach, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, raw, raw_lines, param, load_extension, functions, ): """Execute SQL query and return the results as JSON Example: \b sqlite-utils data.db \\ "select * from chickens where age > :age" \\ -p age 1 """ db = sqlite_utils.Database(path) for alias, attach_path in attach: db.attach(alias, attach_path) _load_extensions(db, load_extension) db.register_fts4_bm25() if functions: _register_functions(db, functions) _execute_query( db, sql, param, raw, raw_lines, table, csv, tsv, no_headers, fmt, nl, arrays, json_cols, ) @cli.command() @click.argument( "paths", type=click.Path(file_okay=True, dir_okay=False, allow_dash=True), required=False, nargs=-1, ) @click.argument("sql") @click.option( "--functions", help="Python code defining one or more custom SQL functions" ) @click.option( "--attach", type=(str, click.Path(file_okay=True, dir_okay=False, allow_dash=False)), multiple=True, help="Additional databases to attach - specify alias and filepath", ) @click.option( "--flatten", is_flag=True, help='Flatten nested JSON objects, so {"foo": {"bar": 1}} becomes {"foo_bar": 1}', ) @output_options @click.option("-r", "--raw", is_flag=True, help="Raw output, first column of first row") @click.option("--raw-lines", is_flag=True, help="Raw output, first column of each row") @click.option( "-p", "--param", multiple=True, type=(str, str), help="Named :parameters for SQL query", ) @click.option( "--encoding", help="Character encoding for CSV input, defaults to utf-8", ) @click.option( "-n", "--no-detect-types", is_flag=True, help="Treat all CSV/TSV columns as TEXT", ) @click.option("--schema", is_flag=True, help="Show SQL schema for in-memory database") @click.option("--dump", is_flag=True, help="Dump SQL for in-memory database") @click.option( "--save", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), help="Save in-memory database to this file", ) @click.option( "--analyze", is_flag=True, help="Analyze resulting tables and output results", ) @load_extension_option def memory( paths, sql, functions, attach, flatten, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, raw, raw_lines, param, encoding, no_detect_types, schema, dump, save, analyze, load_extension, ): """Execute SQL query against an in-memory database, optionally populated by imported data To import data from CSV, TSV or JSON files pass them on the command-line: \b sqlite-utils memory one.csv two.json \\ "select * from one join two on one.two_id = two.id" For data piped into the tool from standard input, use "-" or "stdin": \b cat animals.csv | sqlite-utils memory - \\ "select * from stdin where species = 'dog'" The format of the data will be automatically detected. You can specify the format explicitly using :json, :csv, :tsv or :nl (for newline-delimited JSON) - for example: \b cat animals.csv | sqlite-utils memory stdin:csv places.dat:nl \\ "select * from stdin where place_id in (select id from places)" Use --schema to view the SQL schema of any imported files: \b sqlite-utils memory animals.csv --schema """ db = sqlite_utils.Database(memory=True) # If --dump or --save or --analyze used but no paths detected, assume SQL query is a path: if (dump or save or schema or analyze) and not paths: paths = [sql] sql = None stem_counts = {} for i, path in enumerate(paths): # Path may have a :format suffix fp = None if ":" in path and path.rsplit(":", 1)[-1].upper() in Format.__members__: path, suffix = path.rsplit(":", 1) format = Format[suffix.upper()] else: format = None if path in ("-", "stdin"): fp = sys.stdin.buffer file_table = "stdin" else: file_path = pathlib.Path(path) stem = file_path.stem if stem_counts.get(stem): file_table = "{}_{}".format(stem, stem_counts[stem]) else: file_table = stem stem_counts[stem] = stem_counts.get(stem, 1) + 1 fp = file_path.open("rb") rows, format_used = rows_from_file(fp, format=format, encoding=encoding) tracker = None if format_used in (Format.CSV, Format.TSV) and not no_detect_types: tracker = TypeTracker() rows = tracker.wrap(rows) if flatten: rows = (_flatten(row) for row in rows) db[file_table].insert_all(rows, alter=True) if tracker is not None: db[file_table].transform(types=tracker.types) # Add convenient t / t1 / t2 views view_names = ["t{}".format(i + 1)] if i == 0: view_names.append("t") for view_name in view_names: if not db[view_name].exists(): db.create_view(view_name, "select * from [{}]".format(file_table)) if fp: fp.close() if analyze: _analyze(db, tables=None, columns=None, save=False) return if dump: for line in db.iterdump(): click.echo(line) return if schema: click.echo(db.schema) return if save: db2 = sqlite_utils.Database(save) for line in db.iterdump(): db2.execute(line) return for alias, attach_path in attach: db.attach(alias, attach_path) _load_extensions(db, load_extension) db.register_fts4_bm25() if functions: _register_functions(db, functions) _execute_query( db, sql, param, raw, raw_lines, table, csv, tsv, no_headers, fmt, nl, arrays, json_cols, ) def _execute_query( db, sql, param, raw, raw_lines, table, csv, tsv, no_headers, fmt, nl, arrays, json_cols, ): with db.conn: try: cursor = db.execute(sql, dict(param)) except OperationalError as e: raise click.ClickException(str(e)) if cursor.description is None: # This was an update/insert headers = ["rows_affected"] cursor = [[cursor.rowcount]] else: headers = [c[0] for c in cursor.description] if raw: data = cursor.fetchone()[0] if isinstance(data, bytes): sys.stdout.buffer.write(data) else: sys.stdout.write(str(data)) elif raw_lines: for row in cursor: data = row[0] if isinstance(data, bytes): sys.stdout.buffer.write(data + b"\n") else: sys.stdout.write(str(data) + "\n") elif fmt or table: print( tabulate.tabulate( list(cursor), headers=headers, tablefmt=fmt or "simple" ) ) elif csv or tsv: writer = csv_std.writer(sys.stdout, dialect="excel-tab" if tsv else "excel") if not no_headers: writer.writerow(headers) for row in cursor: writer.writerow(row) else: for line in output_rows(cursor, headers, nl, arrays, json_cols): click.echo(line) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("dbtable") @click.argument("q") @click.option("-o", "--order", type=str, help="Order by ('column' or 'column desc')") @click.option("-c", "--column", type=str, multiple=True, help="Columns to return") @click.option( "--limit", type=int, help="Number of rows to return - defaults to everything", ) @click.option( "--sql", "show_sql", is_flag=True, help="Show SQL query that would be run" ) @click.option("--quote", is_flag=True, help="Apply FTS quoting rules to search term") @output_options @load_extension_option @click.pass_context def search( ctx, path, dbtable, q, order, show_sql, quote, column, limit, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, load_extension, ): """Execute a full-text search against this table Example: sqlite-utils search data.db chickens lila """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) # Check table exists table_obj = db[dbtable] if not table_obj.exists(): raise click.ClickException("Table '{}' does not exist".format(dbtable)) if not table_obj.detect_fts(): raise click.ClickException( "Table '{}' is not configured for full-text search".format(dbtable) ) if column: # Check they all exist table_columns = table_obj.columns_dict for c in column: if c not in table_columns: raise click.ClickException( "Table '{}' has no column '{}".format(dbtable, c) ) sql = table_obj.search_sql(columns=column, order_by=order, limit=limit) if show_sql: click.echo(sql) return if quote: q = db.quote_fts(q) try: ctx.invoke( query, path=path, sql=sql, nl=nl, arrays=arrays, csv=csv, tsv=tsv, no_headers=no_headers, table=table, fmt=fmt, json_cols=json_cols, param=[("query", q)], load_extension=load_extension, ) except click.ClickException as e: if "malformed MATCH expression" in str(e) or "unterminated string" in str(e): raise click.ClickException( "{}\n\nTry running this again with the --quote option".format(str(e)) ) else: raise @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("dbtable") @click.option("-c", "--column", type=str, multiple=True, help="Columns to return") @click.option("--where", help="Optional where clause") @click.option("-o", "--order", type=str, help="Order by ('column' or 'column desc')") @click.option( "-p", "--param", multiple=True, type=(str, str), help="Named :parameters for where clause", ) @click.option( "--limit", type=int, help="Number of rows to return - defaults to everything", ) @click.option( "--offset", type=int, help="SQL offset to use", ) @output_options @load_extension_option @click.pass_context def rows( ctx, path, dbtable, column, where, order, param, limit, offset, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, load_extension, ): """Output all rows in the specified table Example: \b sqlite-utils rows trees.db Trees """ columns = "*" if column: columns = ", ".join("[{}]".format(c) for c in column) sql = "select {} from [{}]".format(columns, dbtable) if where: sql += " where " + where if order: sql += " order by " + order if limit: sql += " limit {}".format(limit) if offset: sql += " offset {}".format(offset) ctx.invoke( query, path=path, sql=sql, nl=nl, arrays=arrays, csv=csv, tsv=tsv, no_headers=no_headers, table=table, fmt=fmt, param=param, json_cols=json_cols, load_extension=load_extension, ) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1) @output_options @load_extension_option @click.pass_context def triggers( ctx, path, tables, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, load_extension, ): """Show triggers configured in this database Example: \b sqlite-utils triggers trees.db """ sql = "select name, tbl_name as [table], sql from sqlite_master where type = 'trigger'" if tables: quote = sqlite_utils.Database(memory=True).quote sql += " and [table] in ({})".format( ", ".join(quote(table) for table in tables) ) ctx.invoke( query, path=path, sql=sql, nl=nl, arrays=arrays, csv=csv, tsv=tsv, no_headers=no_headers, table=table, fmt=fmt, json_cols=json_cols, load_extension=load_extension, ) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1) @click.option("--aux", is_flag=True, help="Include auxiliary columns") @output_options @load_extension_option @click.pass_context def indexes( ctx, path, tables, aux, nl, arrays, csv, tsv, no_headers, table, fmt, json_cols, load_extension, ): """Show indexes for the whole database or specific tables Example: \b sqlite-utils indexes trees.db Trees """ sql = """ select sqlite_master.name as "table", indexes.name as index_name, xinfo.* from sqlite_master join pragma_index_list(sqlite_master.name) indexes join pragma_index_xinfo(index_name) xinfo where sqlite_master.type = 'table' """ if tables: quote = sqlite_utils.Database(memory=True).quote sql += " and sqlite_master.name in ({})".format( ", ".join(quote(table) for table in tables) ) if not aux: sql += " and xinfo.key = 1" ctx.invoke( query, path=path, sql=sql, nl=nl, arrays=arrays, csv=csv, tsv=tsv, no_headers=no_headers, table=table, fmt=fmt, json_cols=json_cols, load_extension=load_extension, ) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("tables", nargs=-1, required=False) @load_extension_option def schema( path, tables, load_extension, ): """Show full schema for this database or for specified tables Example: \b sqlite-utils schema trees.db """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) if tables: for table in tables: click.echo(db[table].schema) else: click.echo(db.schema) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.option( "--type", type=( str, click.Choice(["INTEGER", "TEXT", "FLOAT", "BLOB"], case_sensitive=False), ), multiple=True, help="Change column type to INTEGER, TEXT, FLOAT or BLOB", ) @click.option("--drop", type=str, multiple=True, help="Drop this column") @click.option( "--rename", type=(str, str), multiple=True, help="Rename this column to X" ) @click.option("-o", "--column-order", type=str, multiple=True, help="Reorder columns") @click.option("--not-null", type=str, multiple=True, help="Set this column to NOT NULL") @click.option( "--not-null-false", type=str, multiple=True, help="Remove NOT NULL from this column" ) @click.option("--pk", type=str, multiple=True, help="Make this column the primary key") @click.option( "--pk-none", is_flag=True, help="Remove primary key (convert to rowid table)" ) @click.option( "--default", type=(str, str), multiple=True, help="Set default value for this column", ) @click.option( "--default-none", type=str, multiple=True, help="Remove default from this column" ) @click.option( "add_foreign_keys", "--add-foreign-key", type=(str, str, str), multiple=True, help="Add a foreign key constraint from a column to another table with another column", ) @click.option( "drop_foreign_keys", "--drop-foreign-key", type=str, multiple=True, help="Drop foreign key constraint for this column", ) @click.option("--sql", is_flag=True, help="Output SQL without executing it") @load_extension_option def transform( path, table, type, drop, rename, column_order, not_null, not_null_false, pk, pk_none, default, default_none, add_foreign_keys, drop_foreign_keys, sql, load_extension, ): """Transform a table beyond the capabilities of ALTER TABLE Example: \b sqlite-utils transform mydb.db mytable \\ --drop column1 \\ --rename column2 column_renamed """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) types = {} kwargs = {} for column, ctype in type: if ctype.upper() not in VALID_COLUMN_TYPES: raise click.ClickException( "column types must be one of {}".format(VALID_COLUMN_TYPES) ) types[column] = ctype.upper() not_null_dict = {} for column in not_null: not_null_dict[column] = True for column in not_null_false: not_null_dict[column] = False default_dict = {} for column, value in default: default_dict[column] = value for column in default_none: default_dict[column] = None kwargs["types"] = types kwargs["drop"] = set(drop) kwargs["rename"] = dict(rename) kwargs["column_order"] = column_order or None kwargs["not_null"] = not_null_dict if pk: if len(pk) == 1: kwargs["pk"] = pk[0] else: kwargs["pk"] = pk elif pk_none: kwargs["pk"] = None kwargs["defaults"] = default_dict if drop_foreign_keys: kwargs["drop_foreign_keys"] = drop_foreign_keys if add_foreign_keys: kwargs["add_foreign_keys"] = add_foreign_keys if sql: for line in db[table].transform_sql(**kwargs): click.echo(line) else: db[table].transform(**kwargs) @cli.command() @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument("columns", nargs=-1, required=True) @click.option( "--table", "other_table", help="Name of the other table to extract columns to" ) @click.option("--fk-column", help="Name of the foreign key column to add to the table") @click.option( "--rename", type=(str, str), multiple=True, help="Rename this column in extracted table", ) @load_extension_option def extract( path, table, columns, other_table, fk_column, rename, load_extension, ): """Extract one or more columns into a separate table Example: \b sqlite-utils extract trees.db Street_Trees species """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) kwargs = dict( columns=columns, table=other_table, fk_column=fk_column, rename=dict(rename), ) db[table].extract(**kwargs) @cli.command(name="insert-files") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table") @click.argument( "file_or_dir", nargs=-1, required=True, type=click.Path(file_okay=True, dir_okay=True, allow_dash=True), ) @click.option( "-c", "--column", type=str, multiple=True, help="Column definitions for the table", ) @click.option("--pk", type=str, help="Column to use as primary key") @click.option("--alter", is_flag=True, help="Alter table to add missing columns") @click.option("--replace", is_flag=True, help="Replace files with matching primary key") @click.option("--upsert", is_flag=True, help="Upsert files with matching primary key") @click.option("--name", type=str, help="File name to use") @click.option("--text", is_flag=True, help="Store file content as TEXT, not BLOB") @click.option( "--encoding", help="Character encoding for input, defaults to utf-8", ) @click.option("-s", "--silent", is_flag=True, help="Don't show a progress bar") @load_extension_option def insert_files( path, table, file_or_dir, column, pk, alter, replace, upsert, name, text, encoding, silent, load_extension, ): """ Insert one or more files using BLOB columns in the specified table Example: \b sqlite-utils insert-files pics.db images *.gif \\ -c name:name \\ -c content:content \\ -c content_hash:sha256 \\ -c created:ctime_iso \\ -c modified:mtime_iso \\ -c size:size \\ --pk name """ if not column: if text: column = ["path:path", "content_text:content_text", "size:size"] else: column = ["path:path", "content:content", "size:size"] if not pk: pk = "path" def yield_paths_and_relative_paths(): for f_or_d in file_or_dir: path = pathlib.Path(f_or_d) if f_or_d == "-": yield "-", "-" elif path.is_dir(): for subpath in path.rglob("*"): if subpath.is_file(): yield subpath, subpath.relative_to(path) elif path.is_file(): yield path, path # Load all paths so we can show a progress bar paths_and_relative_paths = list(yield_paths_and_relative_paths()) with progressbar(paths_and_relative_paths, silent=silent) as bar: def to_insert(): for path, relative_path in bar: row = {} # content_text is special case as it considers 'encoding' def _content_text(p): resolved = p.resolve() try: return resolved.read_text(encoding=encoding) except UnicodeDecodeError as e: raise UnicodeDecodeErrorForPath(e, resolved) lookups = dict(FILE_COLUMNS, content_text=_content_text) if path == "-": stdin_data = sys.stdin.buffer.read() # We only support a subset of columns for this case lookups = { "name": lambda p: name or "-", "path": lambda p: name or "-", "content": lambda p: stdin_data, "content_text": lambda p: stdin_data.decode( encoding or "utf-8" ), "sha256": lambda p: hashlib.sha256(stdin_data).hexdigest(), "md5": lambda p: hashlib.md5(stdin_data).hexdigest(), "size": lambda p: len(stdin_data), } for coldef in column: if ":" in coldef: colname, coltype = coldef.rsplit(":", 1) else: colname, coltype = coldef, coldef try: value = lookups[coltype](path) row[colname] = value except KeyError: raise click.ClickException( "'{}' is not a valid column definition - options are {}".format( coltype, ", ".join(lookups.keys()) ) ) # Special case for --name if coltype == "name" and name: row[colname] = name yield row db = sqlite_utils.Database(path) _load_extensions(db, load_extension) try: with db.conn: db[table].insert_all( to_insert(), pk=pk, alter=alter, replace=replace, upsert=upsert ) except UnicodeDecodeErrorForPath as e: raise click.ClickException( UNICODE_ERROR.format( "Could not read file '{}' as text\n\n{}".format(e.path, e.exception) ) ) @cli.command(name="analyze-tables") @click.argument( "path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False, exists=True), required=True, ) @click.argument("tables", nargs=-1) @click.option( "-c", "--column", "columns", type=str, multiple=True, help="Specific columns to analyze", ) @click.option("--save", is_flag=True, help="Save results to _analyze_tables table") @click.option("--common-limit", type=int, default=10, help="How many common values") @click.option("--no-most", is_flag=True, default=False, help="Skip most common values") @click.option( "--no-least", is_flag=True, default=False, help="Skip least common values" ) @load_extension_option def analyze_tables( path, tables, columns, save, common_limit, no_most, no_least, load_extension, ): """Analyze the columns in one or more tables Example: \b sqlite-utils analyze-tables data.db trees """ db = sqlite_utils.Database(path) _load_extensions(db, load_extension) _analyze(db, tables, columns, save, common_limit, no_most, no_least) def _analyze(db, tables, columns, save, common_limit=10, no_most=False, no_least=False): if not tables: tables = db.table_names() todo = [] table_counts = {} seen_columns = set() for table in tables: table_counts[table] = db[table].count for column in db[table].columns: if not columns or column.name in columns: todo.append((table, column.name)) seen_columns.add(column.name) # Check the user didn't specify a column that doesn't exist if columns and (set(columns) - seen_columns): raise click.ClickException( "These columns were not found: {}".format( ", ".join(sorted(set(columns) - seen_columns)) ) ) # Now we now how many we need to do for i, (table, column) in enumerate(todo): column_details = db[table].analyze_column( column, common_limit=common_limit, total_rows=table_counts[table], value_truncate=80, most_common=not no_most, least_common=not no_least, ) if save: db["_analyze_tables_"].insert( column_details._asdict(), pk=("table", "column"), replace=True ) most_common_rendered = "" if column_details.num_null != column_details.total_rows: most_common_rendered = _render_common( "\n\n Most common:", column_details.most_common ) least_common_rendered = _render_common( "\n\n Least common:", column_details.least_common ) details = ( ( textwrap.dedent( """ {table}.{column}: ({i}/{total}) Total rows: {total_rows} Null rows: {num_null} Blank rows: {num_blank} Distinct values: {num_distinct}{most_common_rendered}{least_common_rendered} """ ) .strip() .format( i=i + 1, total=len(todo), most_common_rendered=most_common_rendered, least_common_rendered=least_common_rendered, **column_details._asdict(), ) ) + "\n" ) click.echo(details) @cli.command() @click.argument("packages", nargs=-1, required=False) @click.option( "-U", "--upgrade", is_flag=True, help="Upgrade packages to latest version" ) @click.option( "-e", "--editable", help="Install a project in editable mode from this path", ) def install(packages, upgrade, editable): """Install packages from PyPI into the same environment as sqlite-utils""" args = ["pip", "install"] if upgrade: args += ["--upgrade"] if editable: args += ["--editable", editable] args += list(packages) sys.argv = args run_module("pip", run_name="__main__") @cli.command() @click.argument("packages", nargs=-1, required=True) @click.option("-y", "--yes", is_flag=True, help="Don't ask for confirmation") def uninstall(packages, yes): """Uninstall Python packages from the sqlite-utils environment""" sys.argv = ["pip", "uninstall"] + list(packages) + (["-y"] if yes else []) run_module("pip", run_name="__main__") def _generate_convert_help(): help = textwrap.dedent( """ Convert columns using Python code you supply. For example: \b sqlite-utils convert my.db mytable mycolumn \\ '"\\n".join(textwrap.wrap(value, 10))' \\ --import=textwrap "value" is a variable with the column value to be converted. Use "-" for CODE to read Python code from standard input. The following common operations are available as recipe functions: """ ).strip() recipe_names = [ n for n in dir(recipes) if not n.startswith("_") and n not in ("json", "parser") and callable(getattr(recipes, n)) ] for name in recipe_names: fn = getattr(recipes, name) help += "\n\nr.{}{}\n\n\b{}".format( name, str(inspect.signature(fn)), fn.__doc__.rstrip() ) help += "\n\n" help += textwrap.dedent( """ You can use these recipes like so: \b sqlite-utils convert my.db mytable mycolumn \\ 'r.jsonsplit(value, delimiter=":")' """ ).strip() return help @cli.command(help=_generate_convert_help()) @click.argument( "db_path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table", type=str) @click.argument("columns", type=str, nargs=-1, required=True) @click.argument("code", type=str) @click.option( "--import", "imports", type=str, multiple=True, help="Python modules to import" ) @click.option( "--dry-run", is_flag=True, help="Show results of running this against first 10 rows" ) @click.option( "--multi", is_flag=True, help="Populate columns for keys in returned dictionary" ) @click.option("--where", help="Optional where clause") @click.option( "-p", "--param", multiple=True, type=(str, str), help="Named :parameters for where clause", ) @click.option("--output", help="Optional separate column to populate with the output") @click.option( "--output-type", help="Column type to use for the output column", default="text", type=click.Choice(["integer", "float", "blob", "text"]), ) @click.option("--drop", is_flag=True, help="Drop original column afterwards") @click.option("--no-skip-false", is_flag=True, help="Don't skip falsey values") @click.option("-s", "--silent", is_flag=True, help="Don't show a progress bar") @click.option("pdb_", "--pdb", is_flag=True, help="Open pdb debugger on first error") def convert( db_path, table, columns, code, imports, dry_run, multi, where, param, output, output_type, drop, no_skip_false, silent, pdb_, ): sqlite3.enable_callback_tracebacks(True) db = sqlite_utils.Database(db_path) if output is not None and len(columns) > 1: raise click.ClickException("Cannot use --output with more than one column") if multi and len(columns) > 1: raise click.ClickException("Cannot use --multi with more than one column") if drop and not (output or multi): raise click.ClickException("--drop can only be used with --output or --multi") if code == "-": # Read code from standard input code = sys.stdin.read() where_args = dict(param) if param else [] # Compile the code into a function body called fn(value) try: fn = _compile_code(code, imports) except SyntaxError as e: raise click.ClickException(str(e)) if dry_run: # Pull first 20 values for first column and preview them if multi: def preview(v): return json.dumps(fn(v), default=repr) if v else v else: def preview(v): return fn(v) if v else v db.conn.create_function("preview_transform", 1, preview) sql = """ select [{column}] as value, preview_transform([{column}]) as preview from [{table}]{where} limit 10 """.format( column=columns[0], table=table, where=" where {}".format(where) if where is not None else "", ) for row in db.conn.execute(sql, where_args).fetchall(): click.echo(str(row[0])) click.echo(" --- becomes:") click.echo(str(row[1])) click.echo() count = db[table].count_where( where=where, where_args=where_args, ) click.echo("Would affect {} row{}".format(count, "" if count == 1 else "s")) else: # Wrap fn with a thing that will catch errors and optionally drop into pdb if pdb_: fn_ = fn def wrapped_fn(value): try: return fn_(value) except Exception as ex: print("\nException raised, dropping into pdb...:", ex) pdb.post_mortem(ex.__traceback__) sys.exit(1) fn = wrapped_fn try: db[table].convert( columns, fn, where=where, where_args=where_args, output=output, output_type=output_type, drop=drop, skip_false=not no_skip_false, multi=multi, show_progress=not silent, ) except BadMultiValues as e: raise click.ClickException( "When using --multi code must return a Python dictionary - returned: {}".format( repr(e.values) ) ) @cli.command("add-geometry-column") @click.argument( "db_path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table", type=str) @click.argument("column_name", type=str) @click.option( "-t", "--type", "geometry_type", type=click.Choice( [ "POINT", "LINESTRING", "POLYGON", "MULTIPOINT", "MULTILINESTRING", "MULTIPOLYGON", "GEOMETRYCOLLECTION", "GEOMETRY", ], case_sensitive=False, ), default="GEOMETRY", help="Specify a geometry type for this column.", show_default=True, ) @click.option( "--srid", type=int, default=4326, show_default=True, help="Spatial Reference ID. See https://spatialreference.org for details on specific projections.", ) @click.option( "--dimensions", "coord_dimension", type=str, default="XY", help="Coordinate dimensions. Use XYZ for three-dimensional geometries.", ) @click.option("--not-null", "not_null", is_flag=True, help="Add a NOT NULL constraint.") @load_extension_option def add_geometry_column( db_path, table, column_name, geometry_type, srid, coord_dimension, not_null, load_extension, ): """Add a SpatiaLite geometry column to an existing table. Requires SpatiaLite extension. \n\n By default, this command will try to load the SpatiaLite extension from usual paths. To load it from a specific path, use --load-extension.""" db = sqlite_utils.Database(db_path) if not db[table].exists(): raise click.ClickException( "You must create a table before adding a geometry column" ) # load spatialite, one way or another if load_extension: _load_extensions(db, load_extension) db.init_spatialite() if db[table].add_geometry_column( column_name, geometry_type, srid, coord_dimension, not_null ): click.echo(f"Added {geometry_type} column {column_name} to {table}") @cli.command("create-spatial-index") @click.argument( "db_path", type=click.Path(file_okay=True, dir_okay=False, allow_dash=False), required=True, ) @click.argument("table", type=str) @click.argument("column_name", type=str) @load_extension_option def create_spatial_index(db_path, table, column_name, load_extension): """Create a spatial index on a SpatiaLite geometry column. The table and geometry column must already exist before trying to add a spatial index. \n\n By default, this command will try to load the SpatiaLite extension from usual paths. To load it from a specific path, use --load-extension.""" db = sqlite_utils.Database(db_path) if not db[table].exists(): raise click.ClickException( "You must create a table and add a geometry column before creating a spatial index" ) # load spatialite if load_extension: _load_extensions(db, load_extension) db.init_spatialite() if column_name not in db[table].columns_dict: raise click.ClickException( "You must add a geometry column before creating a spatial index" ) db[table].create_spatial_index(column_name) @cli.command(name="plugins") def plugins_list(): "List installed plugins" click.echo(json.dumps(get_plugins(), indent=2)) pm.hook.register_commands(cli=cli) def _render_common(title, values): if values is None: return "" lines = [title] for value, count in values: lines.append(" {}: {}".format(count, value)) return "\n".join(lines) class UnicodeDecodeErrorForPath(Exception): def __init__(self, exception, path): self.exception = exception self.path = path FILE_COLUMNS = { "name": lambda p: p.name, "path": lambda p: str(p), "fullpath": lambda p: str(p.resolve()), "sha256": lambda p: hashlib.sha256(p.resolve().read_bytes()).hexdigest(), "md5": lambda p: hashlib.md5(p.resolve().read_bytes()).hexdigest(), "mode": lambda p: p.stat().st_mode, "content": lambda p: p.resolve().read_bytes(), "mtime": lambda p: p.stat().st_mtime, "ctime": lambda p: p.stat().st_ctime, "mtime_int": lambda p: int(p.stat().st_mtime), "ctime_int": lambda p: int(p.stat().st_ctime), "mtime_iso": lambda p: datetime.utcfromtimestamp(p.stat().st_mtime).isoformat(), "ctime_iso": lambda p: datetime.utcfromtimestamp(p.stat().st_ctime).isoformat(), "size": lambda p: p.stat().st_size, "stem": lambda p: p.stem, "suffix": lambda p: p.suffix, } def output_rows(iterator, headers, nl, arrays, json_cols): # We have to iterate two-at-a-time so we can know if we # should output a trailing comma or if we have reached # the last row. current_iter, next_iter = itertools.tee(iterator, 2) next(next_iter, None) first = True for row, next_row in itertools.zip_longest(current_iter, next_iter): is_last = next_row is None data = row if json_cols: # Any value that is a valid JSON string should be treated as JSON data = [maybe_json(value) for value in data] if not arrays: data = dict(zip(headers, data)) line = "{firstchar}{serialized}{maybecomma}{lastchar}".format( firstchar=("[" if first else " ") if not nl else "", serialized=json.dumps(data, default=json_binary), maybecomma="," if (not nl and not is_last) else "", lastchar="]" if (is_last and not nl) else "", ) yield line first = False if first: # We didn't output any rows, so yield the empty list yield "[]" def maybe_json(value): if not isinstance(value, str): return value stripped = value.strip() if not (stripped.startswith("{") or stripped.startswith("[")): return value try: return json.loads(stripped) except ValueError: return value def json_binary(value): if isinstance(value, bytes): return {"$base64": True, "encoded": base64.b64encode(value).decode("latin-1")} else: raise TypeError def verify_is_dict(doc): if not isinstance(doc, dict): raise click.ClickException( "Rows must all be dictionaries, got: {}".format(repr(doc)[:1000]) ) return doc def _load_extensions(db, load_extension): if load_extension: db.conn.enable_load_extension(True) for ext in load_extension: if ext == "spatialite" and not os.path.exists(ext): ext = find_spatialite() if ":" in ext: path, _, entrypoint = ext.partition(":") db.conn.execute("SELECT load_extension(?, ?)", [path, entrypoint]) else: db.conn.load_extension(ext) def _register_functions(db, functions): # Register any Python functions as SQL functions: sqlite3.enable_callback_tracebacks(True) globals = {} try: exec(functions, globals) except SyntaxError as ex: raise click.ClickException("Error in functions definition: {}".format(ex)) # Register all callables in the locals dict: for name, value in globals.items(): if callable(value) and not name.startswith("_"): db.register_function(value, name=name) sqlite-utils-3.35.2/sqlite_utils/db.py000066400000000000000000004326321452131415600177370ustar00rootroot00000000000000from .utils import ( chunks, hash_record, sqlite3, OperationalError, suggest_column_types, types_for_column_types, column_affinity, progressbar, find_spatialite, ) import binascii from collections import namedtuple from collections.abc import Mapping import contextlib import datetime import decimal import inspect import itertools import json import os import pathlib import re import secrets from sqlite_fts4 import rank_bm25 # type: ignore import textwrap from typing import ( cast, Any, Callable, Dict, Generator, Iterable, Union, Optional, List, Tuple, ) import uuid from sqlite_utils.plugins import pm try: from sqlite_dump import iterdump except ImportError: iterdump = None SQLITE_MAX_VARS = 999 _quote_fts_re = re.compile(r'\s+|(".*?")') _virtual_table_using_re = re.compile( r""" ^ # Start of string \s*CREATE\s+VIRTUAL\s+TABLE\s+ # CREATE VIRTUAL TABLE ( '(?P[^']*(?:''[^']*)*)' | # single quoted name "(?P[^"]*(?:""[^"]*)*)" | # double quoted name `(?P[^`]+)` | # `backtick` quoted name \[(?P[^\]]+)\] | # [...] quoted name (?P # SQLite non-quoted identifier [A-Za-z_\u0080-\uffff] # \u0080-\uffff = "any character larger than u007f" [A-Za-z_\u0080-\uffff0-9\$]* # zero-or-more alphanemuric or $ ) ) \s+(IF\s+NOT\s+EXISTS\s+)? # IF NOT EXISTS (optional) USING\s+(?P\w+) # for example USING FTS5 """, re.VERBOSE | re.IGNORECASE, ) try: import pandas as pd # type: ignore except ImportError: pd = None # type: ignore try: import numpy as np # type: ignore except ImportError: np = None # type: ignore Column = namedtuple( "Column", ("cid", "name", "type", "notnull", "default_value", "is_pk") ) Column.__doc__ = """ Describes a SQLite column returned by the :attr:`.Table.columns` property. ``cid`` Column index ``name`` Column name ``type`` Column type ``notnull`` Does the column have a ``not null`` constraint ``default_value`` Default value for this column ``is_pk`` Is this column part of the primary key """ ColumnDetails = namedtuple( "ColumnDetails", ( "table", "column", "total_rows", "num_null", "num_blank", "num_distinct", "most_common", "least_common", ), ) ColumnDetails.__doc__ = """ Summary information about a column, see :ref:`python_api_analyze_column`. ``table`` The name of the table ``column`` The name of the column ``total_rows`` The total number of rows in the table ``num_null`` The number of rows for which this column is null ``num_blank`` The number of rows for which this column is blank (the empty string) ``num_distinct`` The number of distinct values in this column ``most_common`` The ``N`` most common values as a list of ``(value, count)`` tuples, or ``None`` if the table consists entirely of distinct values ``least_common`` The ``N`` least common values as a list of ``(value, count)`` tuples, or ``None`` if the table is entirely distinct or if the number of distinct values is less than N (since they will already have been returned in ``most_common``) """ ForeignKey = namedtuple( "ForeignKey", ("table", "column", "other_table", "other_column") ) Index = namedtuple("Index", ("seq", "name", "unique", "origin", "partial", "columns")) XIndex = namedtuple("XIndex", ("name", "columns")) XIndexColumn = namedtuple( "XIndexColumn", ("seqno", "cid", "name", "desc", "coll", "key") ) Trigger = namedtuple("Trigger", ("name", "table", "sql")) ForeignKeyIndicator = Union[ str, ForeignKey, Tuple[str, str], Tuple[str, str, str], Tuple[str, str, str, str], ] ForeignKeysType = Union[Iterable[ForeignKeyIndicator], List[ForeignKeyIndicator]] class Default: pass DEFAULT = Default() COLUMN_TYPE_MAPPING = { float: "FLOAT", int: "INTEGER", bool: "INTEGER", str: "TEXT", dict: "TEXT", tuple: "TEXT", list: "TEXT", bytes.__class__: "BLOB", bytes: "BLOB", memoryview: "BLOB", datetime.datetime: "TEXT", datetime.date: "TEXT", datetime.time: "TEXT", datetime.timedelta: "TEXT", decimal.Decimal: "FLOAT", None.__class__: "TEXT", uuid.UUID: "TEXT", # SQLite explicit types "TEXT": "TEXT", "INTEGER": "INTEGER", "FLOAT": "FLOAT", "BLOB": "BLOB", "text": "TEXT", "integer": "INTEGER", "float": "FLOAT", "blob": "BLOB", } # If numpy is available, add more types if np: COLUMN_TYPE_MAPPING.update( { np.int8: "INTEGER", np.int16: "INTEGER", np.int32: "INTEGER", np.int64: "INTEGER", np.uint8: "INTEGER", np.uint16: "INTEGER", np.uint32: "INTEGER", np.uint64: "INTEGER", np.float16: "FLOAT", np.float32: "FLOAT", np.float64: "FLOAT", } ) # If pandas is available, add more types if pd: COLUMN_TYPE_MAPPING.update({pd.Timestamp: "TEXT"}) # type: ignore class AlterError(Exception): "Error altering table" pass class NoObviousTable(Exception): "Could not tell which table this operation refers to" pass class NoTable(Exception): "Specified table does not exist" pass class BadPrimaryKey(Exception): "Table does not have a single obvious primary key" pass class NotFoundError(Exception): "Record not found" pass class PrimaryKeyRequired(Exception): "Primary key needs to be specified" pass class InvalidColumns(Exception): "Specified columns do not exist" pass class DescIndex(str): pass class BadMultiValues(Exception): "With multi=True code must return a Python dictionary" def __init__(self, values): self.values = values _COUNTS_TABLE_CREATE_SQL = """ CREATE TABLE IF NOT EXISTS [{}]( [table] TEXT PRIMARY KEY, count INTEGER DEFAULT 0 ); """.strip() class Database: """ Wrapper for a SQLite database connection that adds a variety of useful utility methods. To create an instance:: # create data.db file, or open existing: db = Database("data.db") # Create an in-memory database: dB = Database(memory=True) :param filename_or_conn: String path to a file, or a ``pathlib.Path`` object, or a ``sqlite3`` connection :param memory: set to ``True`` to create an in-memory database :param memory_name: creates a named in-memory database that can be shared across multiple connections :param recreate: set to ``True`` to delete and recreate a file database (**dangerous**) :param recursive_triggers: defaults to ``True``, which sets ``PRAGMA recursive_triggers=on;`` - set to ``False`` to avoid setting this pragma :param tracer: set a tracer function (``print`` works for this) which will be called with ``sql, parameters`` every time a SQL query is executed :param use_counts_table: set to ``True`` to use a cached counts table, if available. See :ref:`python_api_cached_table_counts` """ _counts_table_name = "_counts" use_counts_table = False def __init__( self, filename_or_conn: Optional[Union[str, pathlib.Path, sqlite3.Connection]] = None, memory: bool = False, memory_name: Optional[str] = None, recreate: bool = False, recursive_triggers: bool = True, tracer: Optional[Callable] = None, use_counts_table: bool = False, execute_plugins: bool = True, ): assert (filename_or_conn is not None and (not memory and not memory_name)) or ( filename_or_conn is None and (memory or memory_name) ), "Either specify a filename_or_conn or pass memory=True" if memory_name: uri = "file:{}?mode=memory&cache=shared".format(memory_name) self.conn = sqlite3.connect( uri, uri=True, check_same_thread=False, ) elif memory or filename_or_conn == ":memory:": self.conn = sqlite3.connect(":memory:") elif isinstance(filename_or_conn, (str, pathlib.Path)): if recreate and os.path.exists(filename_or_conn): try: os.remove(filename_or_conn) except OSError: # Avoid mypy and __repr__ errors, see: # https://github.com/simonw/sqlite-utils/issues/503 self.conn = sqlite3.connect(":memory:") raise self.conn = sqlite3.connect(str(filename_or_conn)) else: assert not recreate, "recreate cannot be used with connections, only paths" self.conn = filename_or_conn self._tracer = tracer if recursive_triggers: self.execute("PRAGMA recursive_triggers=on;") self._registered_functions: set = set() self.use_counts_table = use_counts_table if execute_plugins: pm.hook.prepare_connection(conn=self.conn) def close(self): "Close the SQLite connection, and the underlying database file" self.conn.close() @contextlib.contextmanager def ensure_autocommit_off(self): """ Ensure autocommit is off for this database connection. Example usage:: with db.ensure_autocommit_off(): # do stuff here This will reset to the previous autocommit state at the end of the block. """ old_isolation_level = self.conn.isolation_level try: self.conn.isolation_level = None yield finally: self.conn.isolation_level = old_isolation_level @contextlib.contextmanager def tracer(self, tracer: Optional[Callable] = None): """ Context manager to temporarily set a tracer function - all executed SQL queries will be passed to this. The tracer function should accept two arguments: ``sql`` and ``parameters`` Example usage:: with db.tracer(print): db["creatures"].insert({"name": "Cleo"}) See :ref:`python_api_tracing`. :param tracer: Callable accepting ``sql`` and ``parameters`` arguments """ prev_tracer = self._tracer self._tracer = tracer or print try: yield self finally: self._tracer = prev_tracer def __getitem__(self, table_name: str) -> Union["Table", "View"]: """ ``db[table_name]`` returns a :class:`.Table` object for the table with the specified name. If the table does not exist yet it will be created the first time data is inserted into it. :param table_name: The name of the table """ return self.table(table_name) def __repr__(self) -> str: return "".format(self.conn) def register_function( self, fn: Optional[Callable] = None, deterministic: bool = False, replace: bool = False, name: Optional[str] = None, ): """ ``fn`` will be made available as a function within SQL, with the same name and number of arguments. Can be used as a decorator:: @db.register_function def upper(value): return str(value).upper() The decorator can take arguments:: @db.register_function(deterministic=True, replace=True) def upper(value): return str(value).upper() See :ref:`python_api_register_function`. :param fn: Function to register :param deterministic: set ``True`` for functions that always returns the same output for a given input :param replace: set ``True`` to replace an existing function with the same name - otherwise throw an error :param name: name of the SQLite function - if not specified, the Python function name will be used """ def register(fn): fn_name = name or fn.__name__ arity = len(inspect.signature(fn).parameters) if not replace and (fn_name, arity) in self._registered_functions: return fn kwargs = {} registered = False if deterministic: # Try this, but fall back if sqlite3.NotSupportedError try: self.conn.create_function( fn_name, arity, fn, **dict(kwargs, deterministic=True) ) registered = True except (sqlite3.NotSupportedError, TypeError): # TypeError is Python 3.7 "function takes at most 3 arguments" pass if not registered: self.conn.create_function(fn_name, arity, fn, **kwargs) self._registered_functions.add((fn_name, arity)) return fn if fn is None: return register else: register(fn) def register_fts4_bm25(self): "Register the ``rank_bm25(match_info)`` function used for calculating relevance with SQLite FTS4." self.register_function(rank_bm25, deterministic=True, replace=True) def attach(self, alias: str, filepath: Union[str, pathlib.Path]): """ Attach another SQLite database file to this connection with the specified alias, equivalent to:: ATTACH DATABASE 'filepath.db' AS alias :param alias: Alias name to use :param filepath: Path to SQLite database file on disk """ attach_sql = """ ATTACH DATABASE '{}' AS [{}]; """.format( str(pathlib.Path(filepath).resolve()), alias ).strip() self.execute(attach_sql) def query( self, sql: str, params: Optional[Union[Iterable, dict]] = None ) -> Generator[dict, None, None]: """ Execute ``sql`` and return an iterable of dictionaries representing each row. :param sql: SQL query to execute :param params: Parameters to use in that query - an iterable for ``where id = ?`` parameters, or a dictionary for ``where id = :id`` """ cursor = self.execute(sql, params or tuple()) keys = [d[0] for d in cursor.description] for row in cursor: yield dict(zip(keys, row)) def execute( self, sql: str, parameters: Optional[Union[Iterable, dict]] = None ) -> sqlite3.Cursor: """ Execute SQL query and return a ``sqlite3.Cursor``. :param sql: SQL query to execute :param parameters: Parameters to use in that query - an iterable for ``where id = ?`` parameters, or a dictionary for ``where id = :id`` """ if self._tracer: self._tracer(sql, parameters) if parameters is not None: return self.conn.execute(sql, parameters) else: return self.conn.execute(sql) def executescript(self, sql: str) -> sqlite3.Cursor: """ Execute multiple SQL statements separated by ; and return the ``sqlite3.Cursor``. :param sql: SQL to execute """ if self._tracer: self._tracer(sql, None) return self.conn.executescript(sql) def table(self, table_name: str, **kwargs) -> Union["Table", "View"]: """ Return a table object, optionally configured with default options. See :ref:`reference_db_table` for option details. :param table_name: Name of the table """ klass = View if table_name in self.view_names() else Table return klass(self, table_name, **kwargs) def quote(self, value: str) -> str: """ Apply SQLite string quoting to a value, including wrapping it in single quotes. :param value: String to quote """ # Normally we would use .execute(sql, [params]) for escaping, but # occasionally that isn't available - most notable when we need # to include a "... DEFAULT 'value'" in a column definition. return self.execute( # Use SQLite itself to correctly escape this string: "SELECT quote(:value)", {"value": value}, ).fetchone()[0] def quote_fts(self, query: str) -> str: """ Escape special characters in a SQLite full-text search query. This works by surrounding each token within the query with double quotes, in order to avoid words like ``NOT`` and ``OR`` having special meaning as defined by the FTS query syntax here: https://www.sqlite.org/fts5.html#full_text_query_syntax If the query has unbalanced ``"`` characters, adds one at end. :param query: String to escape """ if query.count('"') % 2: query += '"' bits = _quote_fts_re.split(query) bits = [b for b in bits if b and b != '""'] return " ".join( '"{}"'.format(bit) if not bit.startswith('"') else bit for bit in bits ) def quote_default_value(self, value: str) -> str: if any( [ str(value).startswith("'") and str(value).endswith("'"), str(value).startswith('"') and str(value).endswith('"'), ] ): return value if str(value).upper() in ("CURRENT_TIME", "CURRENT_DATE", "CURRENT_TIMESTAMP"): return value if str(value).endswith(")"): # Expr return "({})".format(value) return self.quote(value) def table_names(self, fts4: bool = False, fts5: bool = False) -> List[str]: """ List of string table names in this database. :param fts4: Only return tables that are part of FTS4 indexes :param fts5: Only return tables that are part of FTS5 indexes """ where = ["type = 'table'"] if fts4: where.append("sql like '%USING FTS4%'") if fts5: where.append("sql like '%USING FTS5%'") sql = "select name from sqlite_master where {}".format(" AND ".join(where)) return [r[0] for r in self.execute(sql).fetchall()] def view_names(self) -> List[str]: "List of string view names in this database." return [ r[0] for r in self.execute( "select name from sqlite_master where type = 'view'" ).fetchall() ] @property def tables(self) -> List["Table"]: "List of Table objects in this database." return cast(List["Table"], [self[name] for name in self.table_names()]) @property def views(self) -> List["View"]: "List of View objects in this database." return cast(List["View"], [self[name] for name in self.view_names()]) @property def triggers(self) -> List[Trigger]: "List of ``(name, table_name, sql)`` tuples representing triggers in this database." return [ Trigger(*r) for r in self.execute( "select name, tbl_name, sql from sqlite_master where type = 'trigger'" ).fetchall() ] @property def triggers_dict(self) -> Dict[str, str]: "A ``{trigger_name: sql}`` dictionary of triggers in this database." return {trigger.name: trigger.sql for trigger in self.triggers} @property def schema(self) -> str: "SQL schema for this database." sqls = [] for row in self.execute( "select sql from sqlite_master where sql is not null" ).fetchall(): sql = row[0] if not sql.strip().endswith(";"): sql += ";" sqls.append(sql) return "\n".join(sqls) @property def supports_strict(self) -> bool: "Does this database support STRICT mode?" try: table_name = "t{}".format(secrets.token_hex(16)) with self.conn: self.conn.execute( "create table {} (name text) strict".format(table_name) ) self.conn.execute("drop table {}".format(table_name)) return True except Exception: return False @property def sqlite_version(self) -> Tuple[int, ...]: "Version of SQLite, as a tuple of integers for example ``(3, 36, 0)``." row = self.execute("select sqlite_version()").fetchall()[0] return tuple(map(int, row[0].split("."))) @property def journal_mode(self) -> str: """ Current ``journal_mode`` of this database. https://www.sqlite.org/pragma.html#pragma_journal_mode """ return self.execute("PRAGMA journal_mode;").fetchone()[0] def enable_wal(self): """ Sets ``journal_mode`` to ``'wal'`` to enable Write-Ahead Log mode. """ if self.journal_mode != "wal": with self.ensure_autocommit_off(): self.execute("PRAGMA journal_mode=wal;") def disable_wal(self): "Sets ``journal_mode`` back to ``'delete'`` to disable Write-Ahead Log mode." if self.journal_mode != "delete": with self.ensure_autocommit_off(): self.execute("PRAGMA journal_mode=delete;") def _ensure_counts_table(self): with self.conn: self.execute(_COUNTS_TABLE_CREATE_SQL.format(self._counts_table_name)) def enable_counts(self): """ Enable trigger-based count caching for every table in the database, see :ref:`python_api_cached_table_counts`. """ self._ensure_counts_table() for table in self.tables: if ( table.virtual_table_using is None and table.name != self._counts_table_name ): table.enable_counts() self.use_counts_table = True def cached_counts(self, tables: Optional[Iterable[str]] = None) -> Dict[str, int]: """ Return ``{table_name: count}`` dictionary of cached counts for specified tables, or all tables if ``tables`` not provided. :param tables: Subset list of tables to return counts for. """ sql = "select [table], count from {}".format(self._counts_table_name) if tables: sql += " where [table] in ({})".format(", ".join("?" for table in tables)) try: return {r[0]: r[1] for r in self.execute(sql, tables).fetchall()} except OperationalError: return {} def reset_counts(self): "Re-calculate cached counts for tables." tables = [table for table in self.tables if table.has_counts_triggers] with self.conn: self._ensure_counts_table() counts_table = self[self._counts_table_name] counts_table.delete_where() counts_table.insert_all( {"table": table.name, "count": table.execute_count()} for table in tables ) def execute_returning_dicts( self, sql: str, params: Optional[Union[Iterable, dict]] = None ) -> List[dict]: return list(self.query(sql, params)) def resolve_foreign_keys( self, name: str, foreign_keys: ForeignKeysType ) -> List[ForeignKey]: """ Given a list of differing foreign_keys definitions, return a list of fully resolved ForeignKey() named tuples. :param name: Name of table that foreign keys are being defined for :param foreign_keys: List of foreign keys, each of which can be a string, a ForeignKey() named tuple, a tuple of (column, other_table), or a tuple of (column, other_table, other_column), or a tuple of (table, column, other_table, other_column) """ table = cast(Table, self[name]) if all(isinstance(fk, ForeignKey) for fk in foreign_keys): return cast(List[ForeignKey], foreign_keys) if all(isinstance(fk, str) for fk in foreign_keys): # It's a list of columns fks = [] for column in foreign_keys: column = cast(str, column) other_table = table.guess_foreign_table(column) other_column = table.guess_foreign_column(other_table) fks.append(ForeignKey(name, column, other_table, other_column)) return fks assert all( isinstance(fk, (tuple, list)) for fk in foreign_keys ), "foreign_keys= should be a list of tuples" fks = [] for tuple_or_list in foreign_keys: if len(tuple_or_list) == 4: assert ( tuple_or_list[0] == name ), "First item in {} should have been {}".format(tuple_or_list, name) assert len(tuple_or_list) in ( 2, 3, 4, ), "foreign_keys= should be a list of tuple pairs or triples" if len(tuple_or_list) in (3, 4): if len(tuple_or_list) == 4: tuple_or_list = cast(Tuple[str, str, str], tuple_or_list[1:]) else: tuple_or_list = cast(Tuple[str, str, str], tuple_or_list) fks.append( ForeignKey( name, tuple_or_list[0], tuple_or_list[1], tuple_or_list[2] ) ) else: # Guess the primary key fks.append( ForeignKey( name, tuple_or_list[0], tuple_or_list[1], table.guess_foreign_column(tuple_or_list[1]), ) ) return fks def create_table_sql( self, name: str, columns: Dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, extracts: Optional[Union[Dict[str, str], List[str]]] = None, if_not_exists: bool = False, ) -> str: """ Returns the SQL ``CREATE TABLE`` statement for creating the specified table. :param name: Name of table :param columns: Dictionary mapping column names to their types, for example ``{"name": str, "age": int}`` :param pk: String name of column to use as a primary key, or a tuple of strings for a compound primary key covering multiple columns :param foreign_keys: List of foreign key definitions for this table :param column_order: List specifying which columns should come first :param not_null: List of columns that should be created as ``NOT NULL`` :param defaults: Dictionary specifying default values for columns :param hash_id: Name of column to be used as a primary key containing a hash of the other columns :param hash_id_columns: List of columns to be used when calculating the hash ID for a row :param extracts: List or dictionary of columns to be extracted during inserts, see :ref:`python_api_extracts` :param if_not_exists: Use ``CREATE TABLE IF NOT EXISTS`` """ if hash_id_columns and (hash_id is None): hash_id = "id" foreign_keys = self.resolve_foreign_keys(name, foreign_keys or []) foreign_keys_by_column = {fk.column: fk for fk in foreign_keys} # any extracts will be treated as integer columns with a foreign key extracts = resolve_extracts(extracts) for extract_column, extract_table in extracts.items(): if isinstance(extract_column, tuple): assert False # Ensure other table exists if not self[extract_table].exists(): self.create_table(extract_table, {"id": int, "value": str}, pk="id") columns[extract_column] = int foreign_keys_by_column[extract_column] = ForeignKey( name, extract_column, extract_table, "id" ) # Soundness check not_null, and defaults if provided not_null = not_null or set() defaults = defaults or {} assert columns, "Tables must have at least one column" assert all( n in columns for n in not_null ), "not_null set {} includes items not in columns {}".format( repr(not_null), repr(set(columns.keys())) ) assert all( n in columns for n in defaults ), "defaults set {} includes items not in columns {}".format( repr(set(defaults)), repr(set(columns.keys())) ) validate_column_names(columns.keys()) column_items = list(columns.items()) if column_order is not None: def sort_key(p): return column_order.index(p[0]) if p[0] in column_order else 999 column_items.sort(key=sort_key) if hash_id: column_items.insert(0, (hash_id, str)) pk = hash_id # Soundness check foreign_keys point to existing tables for fk in foreign_keys: if fk.other_table == name and columns.get(fk.other_column): continue if fk.other_column != "rowid" and not any( c for c in self[fk.other_table].columns if c.name == fk.other_column ): raise AlterError( "No such column: {}.{}".format(fk.other_table, fk.other_column) ) column_defs = [] # ensure pk is a tuple single_pk = None if isinstance(pk, list) and len(pk) == 1 and isinstance(pk[0], str): pk = pk[0] if isinstance(pk, str): single_pk = pk if pk not in [c[0] for c in column_items]: column_items.insert(0, (pk, int)) for column_name, column_type in column_items: column_extras = [] if column_name == single_pk: column_extras.append("PRIMARY KEY") if column_name in not_null: column_extras.append("NOT NULL") if column_name in defaults and defaults[column_name] is not None: column_extras.append( "DEFAULT {}".format(self.quote_default_value(defaults[column_name])) ) if column_name in foreign_keys_by_column: column_extras.append( "REFERENCES [{other_table}]([{other_column}])".format( other_table=foreign_keys_by_column[column_name].other_table, other_column=foreign_keys_by_column[column_name].other_column, ) ) column_defs.append( " [{column_name}] {column_type}{column_extras}".format( column_name=column_name, column_type=COLUMN_TYPE_MAPPING[column_type], column_extras=(" " + " ".join(column_extras)) if column_extras else "", ) ) extra_pk = "" if single_pk is None and pk and len(pk) > 1: extra_pk = ",\n PRIMARY KEY ({pks})".format( pks=", ".join(["[{}]".format(p) for p in pk]) ) columns_sql = ",\n".join(column_defs) sql = """CREATE TABLE {if_not_exists}[{table}] ( {columns_sql}{extra_pk} ); """.format( if_not_exists="IF NOT EXISTS " if if_not_exists else "", table=name, columns_sql=columns_sql, extra_pk=extra_pk, ) return sql def create_table( self, name: str, columns: Dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, extracts: Optional[Union[Dict[str, str], List[str]]] = None, if_not_exists: bool = False, replace: bool = False, ignore: bool = False, transform: bool = False, ) -> "Table": """ Create a table with the specified name and the specified ``{column_name: type}`` columns. See :ref:`python_api_explicit_create`. :param name: Name of table :param columns: Dictionary mapping column names to their types, for example ``{"name": str, "age": int}`` :param pk: String name of column to use as a primary key, or a tuple of strings for a compound primary key covering multiple columns :param foreign_keys: List of foreign key definitions for this table :param column_order: List specifying which columns should come first :param not_null: List of columns that should be created as ``NOT NULL`` :param defaults: Dictionary specifying default values for columns :param hash_id: Name of column to be used as a primary key containing a hash of the other columns :param hash_id_columns: List of columns to be used when calculating the hash ID for a row :param extracts: List or dictionary of columns to be extracted during inserts, see :ref:`python_api_extracts` :param if_not_exists: Use ``CREATE TABLE IF NOT EXISTS`` :param replace: Drop and replace table if it already exists :param ignore: Silently do nothing if table already exists :param transform: If table already exists transform it to fit the specified schema """ # Transform table to match the new definition if table already exists: if self[name].exists(): if ignore: return cast(Table, self[name]) elif replace: self[name].drop() if transform and self[name].exists(): table = cast(Table, self[name]) should_transform = False # First add missing columns and figure out columns to drop existing_columns = table.columns_dict missing_columns = dict( (col_name, col_type) for col_name, col_type in columns.items() if col_name not in existing_columns ) columns_to_drop = [ column for column in existing_columns if column not in columns ] if missing_columns: for col_name, col_type in missing_columns.items(): table.add_column(col_name, col_type) if missing_columns or columns_to_drop or columns != existing_columns: should_transform = True # Do we need to change the column order? if ( column_order and list(existing_columns)[: len(column_order)] != column_order ): should_transform = True # Has the primary key changed? current_pks = table.pks desired_pk = None if isinstance(pk, str): desired_pk = [pk] elif pk: desired_pk = list(pk) if desired_pk and current_pks != desired_pk: should_transform = True # Any not-null changes? current_not_null = {c.name for c in table.columns if c.notnull} desired_not_null = set(not_null) if not_null else set() if current_not_null != desired_not_null: should_transform = True # How about defaults? if defaults and defaults != table.default_values: should_transform = True # Only run .transform() if there is something to do if should_transform: table.transform( types=columns, drop=columns_to_drop, column_order=column_order, not_null=not_null, defaults=defaults, pk=pk, ) return table sql = self.create_table_sql( name=name, columns=columns, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, extracts=extracts, if_not_exists=if_not_exists, ) self.execute(sql) created_table = self.table( name, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, ) return cast(Table, created_table) def rename_table(self, name: str, new_name: str): """ Rename a table. :param name: Current table name :param new_name: Name to rename it to """ self.execute( "ALTER TABLE [{name}] RENAME TO [{new_name}]".format( name=name, new_name=new_name ) ) def create_view( self, name: str, sql: str, ignore: bool = False, replace: bool = False ): """ Create a new SQL view with the specified name - ``sql`` should start with ``SELECT ...``. :param name: Name of the view :param sql: SQL ``SELECT`` query to use for this view. :param ignore: Set to ``True`` to do nothing if a view with this name already exists :param replace: Set to ``True`` to replace the view if one with this name already exists """ assert not ( ignore and replace ), "Use one or the other of ignore/replace, not both" create_sql = "CREATE VIEW {name} AS {sql}".format(name=name, sql=sql) if ignore or replace: # Does view exist already? if name in self.view_names(): if ignore: return self elif replace: # If SQL is the same, do nothing if create_sql == self[name].schema: return self self[name].drop() self.execute(create_sql) return self def m2m_table_candidates(self, table: str, other_table: str) -> List[str]: """ Given two table names returns the name of tables that could define a many-to-many relationship between those two tables, based on having foreign keys to both of the provided tables. :param table: Table name :param other_table: Other table name """ candidates = [] tables = {table, other_table} for table_obj in self.tables: # Does it have foreign keys to both table and other_table? has_fks_to = {fk.other_table for fk in table_obj.foreign_keys} if has_fks_to.issuperset(tables): candidates.append(table_obj.name) return candidates def add_foreign_keys(self, foreign_keys: Iterable[Tuple[str, str, str, str]]): """ See :ref:`python_api_add_foreign_keys`. :param foreign_keys: A list of ``(table, column, other_table, other_column)`` tuples """ # foreign_keys is a list of explicit 4-tuples assert all( len(fk) == 4 and isinstance(fk, (list, tuple)) for fk in foreign_keys ), "foreign_keys must be a list of 4-tuples, (table, column, other_table, other_column)" foreign_keys_to_create = [] # Verify that all tables and columns exist for table, column, other_table, other_column in foreign_keys: if not self[table].exists(): raise AlterError("No such table: {}".format(table)) table_obj = self[table] if not isinstance(table_obj, Table): raise AlterError("Must be a table, not a view: {}".format(table)) table_obj = cast(Table, table_obj) if column not in table_obj.columns_dict: raise AlterError("No such column: {} in {}".format(column, table)) if not self[other_table].exists(): raise AlterError("No such other_table: {}".format(other_table)) if ( other_column != "rowid" and other_column not in self[other_table].columns_dict ): raise AlterError( "No such other_column: {} in {}".format(other_column, other_table) ) # We will silently skip foreign keys that exist already if not any( fk for fk in table_obj.foreign_keys if fk.column == column and fk.other_table == other_table and fk.other_column == other_column ): foreign_keys_to_create.append( (table, column, other_table, other_column) ) # Group them by table by_table: Dict[str, List] = {} for fk in foreign_keys_to_create: by_table.setdefault(fk[0], []).append(fk) for table, fks in by_table.items(): cast(Table, self[table]).transform(add_foreign_keys=fks) self.vacuum() def index_foreign_keys(self): "Create indexes for every foreign key column on every table in the database." for table_name in self.table_names(): table = self[table_name] existing_indexes = { i.columns[0] for i in table.indexes if len(i.columns) == 1 } for fk in table.foreign_keys: if fk.column not in existing_indexes: table.create_index([fk.column], find_unique_name=True) def vacuum(self): "Run a SQLite ``VACUUM`` against the database." self.execute("VACUUM;") def analyze(self, name=None): """ Run ``ANALYZE`` against the entire database or a named table or index. :param name: Run ``ANALYZE`` against this specific named table or index """ sql = "ANALYZE" if name is not None: sql += " [{}]".format(name) self.execute(sql) def iterdump(self) -> Generator[str, None, None]: "A sequence of strings representing a SQL dump of the database" if iterdump: yield from iterdump(self.conn) else: try: yield from self.conn.iterdump() except AttributeError: raise AttributeError( "conn.iterdump() not found - try pip install sqlite-dump" ) def init_spatialite(self, path: Optional[str] = None) -> bool: """ The ``init_spatialite`` method will load and initialize the SpatiaLite extension. The ``path`` argument should be an absolute path to the compiled extension, which can be found using ``find_spatialite``. Returns ``True`` if SpatiaLite was successfully initialized. .. code-block:: python from sqlite_utils.db import Database from sqlite_utils.utils import find_spatialite db = Database("mydb.db") db.init_spatialite(find_spatialite()) If you've installed SpatiaLite somewhere unexpected (for testing an alternate version, for example) you can pass in an absolute path: .. code-block:: python from sqlite_utils.db import Database from sqlite_utils.utils import find_spatialite db = Database("mydb.db") db.init_spatialite("./local/mod_spatialite.dylib") :param path: Path to SpatiaLite module on disk """ if path is None: path = find_spatialite() self.conn.enable_load_extension(True) self.conn.load_extension(path) # Initialize SpatiaLite if not yet initialized if "spatial_ref_sys" in self.table_names(): return False cursor = self.execute("select InitSpatialMetadata(1)") result = cursor.fetchone() return result and bool(result[0]) class Queryable: def exists(self) -> bool: "Does this table or view exist yet?" return False def __init__(self, db, name): self.db = db self.name = name def count_where( self, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, ) -> int: """ Executes ``SELECT count(*) FROM table WHERE ...`` and returns a count. :param where: SQL where fragment to use, for example ``id > ?`` :param where_args: Parameters to use with that fragment - an iterable for ``id > ?`` parameters, or a dictionary for ``id > :id`` """ sql = "select count(*) from [{}]".format(self.name) if where is not None: sql += " where " + where return self.db.execute(sql, where_args or []).fetchone()[0] def execute_count(self): # Backwards compatibility, see https://github.com/simonw/sqlite-utils/issues/305#issuecomment-890713185 return self.count_where() @property def count(self) -> int: "A count of the rows in this table or view." return self.count_where() @property def rows(self) -> Generator[dict, None, None]: "Iterate over every dictionaries for each row in this table or view." return self.rows_where() def rows_where( self, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, order_by: Optional[str] = None, select: str = "*", limit: Optional[int] = None, offset: Optional[int] = None, ) -> Generator[dict, None, None]: """ Iterate over every row in this table or view that matches the specified where clause. Returns each row as a dictionary. See :ref:`python_api_rows` for more details. :param where: SQL where fragment to use, for example ``id > ?`` :param where_args: Parameters to use with that fragment - an iterable for ``id > ?`` parameters, or a dictionary for ``id > :id`` :param order_by: Column or fragment of SQL to order by :param select: Comma-separated list of columns to select - defaults to ``*`` :param limit: Integer number of rows to limit to :param offset: Integer for SQL offset """ if not self.exists(): return sql = "select {} from [{}]".format(select, self.name) if where is not None: sql += " where " + where if order_by is not None: sql += " order by " + order_by if limit is not None: sql += " limit {}".format(limit) if offset is not None: sql += " offset {}".format(offset) cursor = self.db.execute(sql, where_args or []) columns = [c[0] for c in cursor.description] for row in cursor: yield dict(zip(columns, row)) def pks_and_rows_where( self, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, order_by: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, ) -> Generator[Tuple[Any, Dict], None, None]: """ Like ``.rows_where()`` but returns ``(pk, row)`` pairs - ``pk`` can be a single value or tuple. :param where: SQL where fragment to use, for example ``id > ?`` :param where_args: Parameters to use with that fragment - an iterable for ``id > ?`` parameters, or a dictionary for ``id > :id`` :param order_by: Column or fragment of SQL to order by :param select: Comma-separated list of columns to select - defaults to ``*`` :param limit: Integer number of rows to limit to :param offset: Integer for SQL offset """ column_names = [column.name for column in self.columns] pks = [column.name for column in self.columns if column.is_pk] if not pks: column_names.insert(0, "rowid") pks = ["rowid"] select = ",".join("[{}]".format(column_name) for column_name in column_names) for row in self.rows_where( select=select, where=where, where_args=where_args, order_by=order_by, limit=limit, offset=offset, ): row_pk = tuple(row[pk] for pk in pks) if len(row_pk) == 1: row_pk = row_pk[0] yield row_pk, row @property def columns(self) -> List["Column"]: "List of :ref:`Columns ` representing the columns in this table or view." if not self.exists(): return [] rows = self.db.execute("PRAGMA table_info([{}])".format(self.name)).fetchall() return [Column(*row) for row in rows] @property def columns_dict(self) -> Dict[str, Any]: "``{column_name: python-type}`` dictionary representing columns in this table or view." return {column.name: column_affinity(column.type) for column in self.columns} @property def schema(self) -> str: "SQL schema for this table or view." return self.db.execute( "select sql from sqlite_master where name = ?", (self.name,) ).fetchone()[0] class Table(Queryable): """ Tables should usually be initialized using the ``db.table(table_name)`` or ``db[table_name]`` methods. The following optional parameters can be passed to ``db.table(table_name, ...)``: :param db: Provided by ``db.table(table_name)`` :param name: Provided by ``db.table(table_name)`` :param pk: Name of the primary key column, or tuple of columns :param foreign_keys: List of foreign key definitions :param column_order: List of column names in the order they should be in the table :param not_null: List of columns that cannot be null :param defaults: Dictionary of column names and default values :param batch_size: Integer number of rows to insert at a time :param hash_id: If True, use a hash of the row values as the primary key :param hash_id_columns: List of columns to use for the hash_id :param alter: If True, automatically alter the table if it doesn't match the schema :param ignore: If True, ignore rows that already exist when inserting :param replace: If True, replace rows that already exist when inserting :param extracts: Dictionary or list of column names to extract into a separate table on inserts :param conversions: Dictionary of column names and conversion functions :param columns: Dictionary of column names to column types """ #: The ``rowid`` of the last inserted, updated or selected row. last_rowid: Optional[int] = None #: The primary key of the last inserted, updated or selected row. last_pk: Optional[Any] = None def __init__( self, db: Database, name: str, pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, batch_size: int = 100, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, alter: bool = False, ignore: bool = False, replace: bool = False, extracts: Optional[Union[Dict[str, str], List[str]]] = None, conversions: Optional[dict] = None, columns: Optional[Dict[str, Any]] = None, ): super().__init__(db, name) self._defaults = dict( pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, batch_size=batch_size, hash_id=hash_id, hash_id_columns=hash_id_columns, alter=alter, ignore=ignore, replace=replace, extracts=extracts, conversions=conversions or {}, columns=columns, ) def __repr__(self) -> str: return "
".format( self.name, " (does not exist yet)" if not self.exists() else " ({})".format(", ".join(c.name for c in self.columns)), ) @property def count(self) -> int: "Count of the rows in this table - optionally from the table count cache, if configured." if self.db.use_counts_table: counts = self.db.cached_counts([self.name]) if counts: return next(iter(counts.values())) return self.count_where() def exists(self) -> bool: return self.name in self.db.table_names() @property def pks(self) -> List[str]: "Primary key columns for this table." names = [column.name for column in self.columns if column.is_pk] if not names: names = ["rowid"] return names @property def use_rowid(self) -> bool: "Does this table use ``rowid`` for its primary key (no other primary keys are specified)?" return not any(column for column in self.columns if column.is_pk) def get(self, pk_values: Union[list, tuple, str, int]) -> dict: """ Return row (as dictionary) for the specified primary key. Raises ``sqlite_utils.db.NotFoundError`` if a matching row cannot be found. :param pk_values: A single value, or a tuple of values for tables that have a compound primary key """ if not isinstance(pk_values, (list, tuple)): pk_values = [pk_values] pks = self.pks last_pk = pk_values[0] if len(pks) == 1 else pk_values if len(pks) != len(pk_values): raise NotFoundError( "Need {} primary key value{}".format( len(pks), "" if len(pks) == 1 else "s" ) ) wheres = ["[{}] = ?".format(pk_name) for pk_name in pks] rows = self.rows_where(" and ".join(wheres), pk_values) try: row = list(rows)[0] self.last_pk = last_pk return row except IndexError: raise NotFoundError @property def foreign_keys(self) -> List["ForeignKey"]: "List of foreign keys defined on this table." fks = [] for row in self.db.execute( "PRAGMA foreign_key_list([{}])".format(self.name) ).fetchall(): if row is not None: id, seq, table_name, from_, to_, on_update, on_delete, match = row fks.append( ForeignKey( table=self.name, column=from_, other_table=table_name, other_column=to_, ) ) return fks @property def virtual_table_using(self) -> Optional[str]: "Type of virtual table, or ``None`` if this is not a virtual table." match = _virtual_table_using_re.match(self.schema) if match is None: return None return match.groupdict()["using"].upper() @property def indexes(self) -> List[Index]: "List of indexes defined on this table." sql = 'PRAGMA index_list("{}")'.format(self.name) indexes = [] for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( '"{}"'.format(index_name) if not index_name.startswith('"') else index_name ) column_sql = "PRAGMA index_info({})".format(index_name_quoted) columns = [] for seqno, cid, name in self.db.execute(column_sql).fetchall(): columns.append(name) row["columns"] = columns # These columns may be missing on older SQLite versions: for key, default in {"origin": "c", "partial": 0}.items(): if key not in row: row[key] = default indexes.append(Index(**row)) return indexes @property def xindexes(self) -> List[XIndex]: "List of indexes defined on this table using the more detailed ``XIndex`` format." sql = 'PRAGMA index_list("{}")'.format(self.name) indexes = [] for row in self.db.execute_returning_dicts(sql): index_name = row["name"] index_name_quoted = ( '"{}"'.format(index_name) if not index_name.startswith('"') else index_name ) column_sql = "PRAGMA index_xinfo({})".format(index_name_quoted) index_columns = [] for info in self.db.execute(column_sql).fetchall(): index_columns.append(XIndexColumn(*info)) indexes.append(XIndex(index_name, index_columns)) return indexes @property def triggers(self) -> List[Trigger]: "List of triggers defined on this table." return [ Trigger(*r) for r in self.db.execute( "select name, tbl_name, sql from sqlite_master where type = 'trigger'" " and tbl_name = ?", (self.name,), ).fetchall() ] @property def triggers_dict(self) -> Dict[str, str]: "``{trigger_name: sql}`` dictionary of triggers defined on this table." return {trigger.name: trigger.sql for trigger in self.triggers} @property def default_values(self) -> Dict[str, Any]: "``{column_name: default_value}`` dictionary of default values for columns in this table." return { column.name: _decode_default_value(column.default_value) for column in self.columns if column.default_value is not None } @property def strict(self) -> bool: "Is this a STRICT table?" table_suffix = self.schema.split(")")[-1].strip().upper() table_options = [bit.strip() for bit in table_suffix.split(",")] return "STRICT" in table_options def create( self, columns: Dict[str, Any], pk: Optional[Any] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, hash_id: Optional[str] = None, hash_id_columns: Optional[Iterable[str]] = None, extracts: Optional[Union[Dict[str, str], List[str]]] = None, if_not_exists: bool = False, replace: bool = False, ignore: bool = False, transform: bool = False, ) -> "Table": """ Create a table with the specified columns. See :ref:`python_api_explicit_create` for full details. :param columns: Dictionary mapping column names to their types, for example ``{"name": str, "age": int}`` :param pk: String name of column to use as a primary key, or a tuple of strings for a compound primary key covering multiple columns :param foreign_keys: List of foreign key definitions for this table :param column_order: List specifying which columns should come first :param not_null: List of columns that should be created as ``NOT NULL`` :param defaults: Dictionary specifying default values for columns :param hash_id: Name of column to be used as a primary key containing a hash of the other columns :param hash_id_columns: List of columns to be used when calculating the hash ID for a row :param extracts: List or dictionary of columns to be extracted during inserts, see :ref:`python_api_extracts` :param if_not_exists: Use ``CREATE TABLE IF NOT EXISTS`` :param replace: Drop and replace table if it already exists :param ignore: Silently do nothing if table already exists :param transform: If table already exists transform it to fit the specified schema """ columns = {name: value for (name, value) in columns.items()} with self.db.conn: self.db.create_table( self.name, columns, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, extracts=extracts, if_not_exists=if_not_exists, replace=replace, ignore=ignore, transform=transform, ) return self def duplicate(self, new_name: str) -> "Table": """ Create a duplicate of this table, copying across the schema and all row data. :param new_name: Name of the new table """ if not self.exists(): raise NoTable(f"Table {self.name} does not exist") with self.db.conn: sql = "CREATE TABLE [{new_table}] AS SELECT * FROM [{table}];".format( new_table=new_name, table=self.name, ) self.db.execute(sql) return self.db[new_name] def transform( self, *, types: Optional[dict] = None, rename: Optional[dict] = None, drop: Optional[Iterable] = None, pk: Optional[Any] = DEFAULT, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, drop_foreign_keys: Optional[Iterable[str]] = None, add_foreign_keys: Optional[ForeignKeysType] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, keep_table: Optional[str] = None, ) -> "Table": """ Apply an advanced alter table, including operations that are not supported by ``ALTER TABLE`` in SQLite itself. See :ref:`python_api_transform` for full details. :param types: Columns that should have their type changed, for example ``{"weight": float}`` :param rename: Columns to rename, for example ``{"headline": "title"}`` :param drop: Columns to drop :param pk: New primary key for the table :param not_null: Columns to set as ``NOT NULL`` :param defaults: Default values for columns :param drop_foreign_keys: Names of columns that should have their foreign key constraints removed :param add_foreign_keys: List of foreign keys to add to the table :param foreign_keys: List of foreign keys to set for the table, replacing any existing foreign keys :param column_order: List of strings specifying a full or partial column order to use when creating the table :param keep_table: If specified, the existing table will be renamed to this and will not be dropped """ assert self.exists(), "Cannot transform a table that doesn't exist yet" sqls = self.transform_sql( types=types, rename=rename, drop=drop, pk=pk, not_null=not_null, defaults=defaults, drop_foreign_keys=drop_foreign_keys, add_foreign_keys=add_foreign_keys, foreign_keys=foreign_keys, column_order=column_order, keep_table=keep_table, ) pragma_foreign_keys_was_on = self.db.execute("PRAGMA foreign_keys").fetchone()[ 0 ] try: if pragma_foreign_keys_was_on: self.db.execute("PRAGMA foreign_keys=0;") with self.db.conn: for sql in sqls: self.db.execute(sql) # Run the foreign_key_check before we commit if pragma_foreign_keys_was_on: self.db.execute("PRAGMA foreign_key_check;") finally: if pragma_foreign_keys_was_on: self.db.execute("PRAGMA foreign_keys=1;") return self def transform_sql( self, *, types: Optional[dict] = None, rename: Optional[dict] = None, drop: Optional[Iterable] = None, pk: Optional[Any] = DEFAULT, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, drop_foreign_keys: Optional[Iterable] = None, add_foreign_keys: Optional[ForeignKeysType] = None, foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, tmp_suffix: Optional[str] = None, keep_table: Optional[str] = None, ) -> List[str]: """ Return a list of SQL statements that should be executed in order to apply this transformation. :param types: Columns that should have their type changed, for example ``{"weight": float}`` :param rename: Columns to rename, for example ``{"headline": "title"}`` :param drop: Columns to drop :param pk: New primary key for the table :param not_null: Columns to set as ``NOT NULL`` :param defaults: Default values for columns :param drop_foreign_keys: Names of columns that should have their foreign key constraints removed :param add_foreign_keys: List of foreign keys to add to the table :param foreign_keys: List of foreign keys to set for the table, replacing any existing foreign keys :param column_order: List of strings specifying a full or partial column order to use when creating the table :param tmp_suffix: Suffix to use for the temporary table name :param keep_table: If specified, the existing table will be renamed to this and will not be dropped """ types = types or {} rename = rename or {} drop = drop or set() create_table_foreign_keys: List[ForeignKeyIndicator] = [] if foreign_keys is not None: if add_foreign_keys is not None: raise ValueError( "Cannot specify both foreign_keys and add_foreign_keys" ) if drop_foreign_keys is not None: raise ValueError( "Cannot specify both foreign_keys and drop_foreign_keys" ) create_table_foreign_keys.extend(foreign_keys) else: # Construct foreign_keys from current, plus add_foreign_keys, minus drop_foreign_keys create_table_foreign_keys = [] for table, column, other_table, other_column in self.foreign_keys: # Copy over old foreign keys, unless we are dropping them if (drop_foreign_keys is None) or (column not in drop_foreign_keys): create_table_foreign_keys.append( ForeignKey( table, rename.get(column) or column, other_table, other_column, ) ) # Add new foreign keys if add_foreign_keys is not None: for fk in self.db.resolve_foreign_keys(self.name, add_foreign_keys): create_table_foreign_keys.append( ForeignKey( self.name, rename.get(fk.column) or fk.column, fk.other_table, fk.other_column, ) ) new_table_name = "{}_new_{}".format( self.name, tmp_suffix or os.urandom(6).hex() ) current_column_pairs = list(self.columns_dict.items()) new_column_pairs = [] copy_from_to = {column: column for column, _ in current_column_pairs} for name, type_ in current_column_pairs: type_ = types.get(name) or type_ if name in drop: del [copy_from_to[name]] continue new_name = rename.get(name) or name new_column_pairs.append((new_name, type_)) copy_from_to[name] = new_name if pk is DEFAULT: pks_renamed = tuple( rename.get(p.name) or p.name for p in self.columns if p.is_pk ) if len(pks_renamed) == 1: pk = pks_renamed[0] else: pk = pks_renamed # not_null may be a set or dict, need to convert to a set create_table_not_null = { rename.get(c.name) or c.name for c in self.columns if c.notnull if c.name not in drop } if isinstance(not_null, dict): # Remove any columns with a value of False for key, value in not_null.items(): # Column may have been renamed key = rename.get(key) or key if value is False and key in create_table_not_null: create_table_not_null.remove(key) else: create_table_not_null.add(key) elif isinstance(not_null, set): create_table_not_null.update((rename.get(k) or k) for k in not_null) elif not not_null: pass else: assert False, "not_null must be a dict or a set or None, it was {}".format( repr(not_null) ) # defaults= create_table_defaults = { (rename.get(c.name) or c.name): c.default_value for c in self.columns if c.default_value is not None and c.name not in drop } if defaults is not None: create_table_defaults.update( {rename.get(c) or c: v for c, v in defaults.items()} ) if column_order is not None: column_order = [rename.get(col) or col for col in column_order] sqls = [] sqls.append( self.db.create_table_sql( new_table_name, dict(new_column_pairs), pk=pk, not_null=create_table_not_null, defaults=create_table_defaults, foreign_keys=create_table_foreign_keys, column_order=column_order, ).strip() ) # Copy across data, respecting any renamed columns new_cols = [] old_cols = [] for from_, to_ in copy_from_to.items(): old_cols.append(from_) new_cols.append(to_) # Ensure rowid is copied too if "rowid" not in new_cols: new_cols.insert(0, "rowid") old_cols.insert(0, "rowid") copy_sql = "INSERT INTO [{new_table}] ({new_cols})\n SELECT {old_cols} FROM [{old_table}];".format( new_table=new_table_name, old_table=self.name, old_cols=", ".join("[{}]".format(col) for col in old_cols), new_cols=", ".join("[{}]".format(col) for col in new_cols), ) sqls.append(copy_sql) # Drop (or keep) the old table if keep_table: sqls.append( "ALTER TABLE [{}] RENAME TO [{}];".format(self.name, keep_table) ) else: sqls.append("DROP TABLE [{}];".format(self.name)) # Rename the new one sqls.append( "ALTER TABLE [{}] RENAME TO [{}];".format(new_table_name, self.name) ) return sqls def extract( self, columns: Union[str, Iterable[str]], table: Optional[str] = None, fk_column: Optional[str] = None, rename: Optional[Dict[str, str]] = None, ) -> "Table": """ Extract specified columns into a separate table. See :ref:`python_api_extract` for details. :param columns: Single column or list of columns that should be extracted :param table: Name of table in which the new records should be created :param fk_column: Name of the foreign key column to populate in the original table :param rename: Dictionary of columns that should be renamed when populating the new table """ rename = rename or {} if isinstance(columns, str): columns = [columns] if not set(columns).issubset(self.columns_dict.keys()): raise InvalidColumns( "Invalid columns {} for table with columns {}".format( columns, list(self.columns_dict.keys()) ) ) table = table or "_".join(columns) lookup_table = self.db[table] fk_column = fk_column or "{}_id".format(table) magic_lookup_column = "{}_{}".format(fk_column, os.urandom(6).hex()) # Populate the lookup table with all of the extracted unique values lookup_columns_definition = { (rename.get(col) or col): typ for col, typ in self.columns_dict.items() if col in columns } if lookup_table.exists(): if not set(lookup_columns_definition.items()).issubset( lookup_table.columns_dict.items() ): raise InvalidColumns( "Lookup table {} already exists but does not have columns {}".format( table, lookup_columns_definition ) ) else: lookup_table.create( { **{ "id": int, }, **lookup_columns_definition, }, pk="id", ) lookup_columns = [(rename.get(col) or col) for col in columns] lookup_table.create_index(lookup_columns, unique=True, if_not_exists=True) self.db.execute( "INSERT OR IGNORE INTO [{lookup_table}] ({lookup_columns}) SELECT DISTINCT {table_cols} FROM [{table}]".format( lookup_table=table, lookup_columns=", ".join("[{}]".format(c) for c in lookup_columns), table_cols=", ".join("[{}]".format(c) for c in columns), table=self.name, ) ) # Now add the new fk_column self.add_column(magic_lookup_column, int) # And populate it self.db.execute( "UPDATE [{table}] SET [{magic_lookup_column}] = (SELECT id FROM [{lookup_table}] WHERE {where})".format( table=self.name, magic_lookup_column=magic_lookup_column, lookup_table=table, where=" AND ".join( "[{table}].[{column}] IS [{lookup_table}].[{lookup_column}]".format( table=self.name, lookup_table=table, column=column, lookup_column=rename.get(column) or column, ) for column in columns ), ) ) # Figure out the right column order column_order = [] for c in self.columns: if c.name in columns and magic_lookup_column not in column_order: column_order.append(magic_lookup_column) elif c.name == magic_lookup_column: continue else: column_order.append(c.name) # Drop the unnecessary columns and rename lookup column self.transform( drop=set(columns), rename={magic_lookup_column: fk_column}, column_order=column_order, ) # And add the foreign key constraint self.add_foreign_key(fk_column, table, "id") return self def create_index( self, columns: Iterable[Union[str, DescIndex]], index_name: Optional[str] = None, unique: bool = False, if_not_exists: bool = False, find_unique_name: bool = False, analyze: bool = False, ): """ Create an index on this table. :param columns: A single columns or list of columns to index. These can be strings or, to create an index using the column in descending order, ``db.DescIndex(column_name)`` objects. :param index_name: The name to use for the new index. Defaults to the column names joined on ``_``. :param unique: Should the index be marked as unique, forcing unique values? :param if_not_exists: Only create the index if one with that name does not already exist. :param find_unique_name: If ``index_name`` is not provided and the automatically derived name already exists, keep incrementing a suffix number to find an available name. :param analyze: Run ``ANALYZE`` against this index after creating it. See :ref:`python_api_create_index`. """ if index_name is None: index_name = "idx_{}_{}".format( self.name.replace(" ", "_"), "_".join(columns) ) columns_sql = [] for column in columns: if isinstance(column, DescIndex): fmt = "[{}] desc" else: fmt = "[{}]" columns_sql.append(fmt.format(column)) suffix = None created_index_name = None while True: created_index_name = ( "{}_{}".format(index_name, suffix) if suffix else index_name ) sql = ( textwrap.dedent( """ CREATE {unique}INDEX {if_not_exists}[{index_name}] ON [{table_name}] ({columns}); """ ) .strip() .format( index_name=created_index_name, table_name=self.name, columns=", ".join(columns_sql), unique="UNIQUE " if unique else "", if_not_exists="IF NOT EXISTS " if if_not_exists else "", ) ) try: self.db.execute(sql) break except OperationalError as e: # find_unique_name=True - try again if 'index ... already exists' arg = e.args[0] if ( find_unique_name and arg.startswith("index ") and arg.endswith(" already exists") ): if suffix is None: suffix = 2 else: suffix += 1 continue else: raise e if analyze: self.db.analyze(created_index_name) return self def add_column( self, col_name: str, col_type: Optional[Any] = None, fk: Optional[str] = None, fk_col: Optional[str] = None, not_null_default: Optional[Any] = None, ): """ Add a column to this table. See :ref:`python_api_add_column`. :param col_name: Name of the new column :param col_type: Column type - a Python type such as ``str`` or a SQLite type string such as ``"BLOB"`` :param fk: Name of a table that this column should be a foreign key reference to :param fk_col: Column in the foreign key table that this should reference :param not_null_default: Set this column to ``not null`` and give it this default value """ fk_col_type = None if fk is not None: # fk must be a valid table if fk not in self.db.table_names(): raise AlterError("table '{}' does not exist".format(fk)) # if fk_col specified, must be a valid column if fk_col is not None: if fk_col not in self.db[fk].columns_dict: raise AlterError("table '{}' has no column {}".format(fk, fk_col)) else: # automatically set fk_col to first primary_key of fk table pks = [c for c in self.db[fk].columns if c.is_pk] if pks: fk_col = pks[0].name fk_col_type = pks[0].type else: fk_col = "rowid" fk_col_type = "INTEGER" if col_type is None: col_type = str not_null_sql = None if not_null_default is not None: not_null_sql = "NOT NULL DEFAULT {}".format( self.db.quote_default_value(not_null_default) ) sql = "ALTER TABLE [{table}] ADD COLUMN [{col_name}] {col_type}{not_null_default};".format( table=self.name, col_name=col_name, col_type=fk_col_type or COLUMN_TYPE_MAPPING[col_type], not_null_default=(" " + not_null_sql) if not_null_sql else "", ) self.db.execute(sql) if fk is not None: self.add_foreign_key(col_name, fk, fk_col) return self def drop(self, ignore: bool = False): """ Drop this table. :param ignore: Set to ``True`` to ignore the error if the table does not exist """ try: self.db.execute("DROP TABLE [{}]".format(self.name)) except sqlite3.OperationalError: if not ignore: raise def guess_foreign_table(self, column: str) -> str: """ For a given column, suggest another table that might be referenced by this column should it be used as a foreign key. For example, a column called ``tag_id`` or ``tag`` or ``tags`` might suggest a ``tag`` table, if one exists. If no candidates can be found, raises a ``NoObviousTable`` exception. :param column: Name of column """ column = column.lower() possibilities = [column] if column.endswith("_id"): column_without_id = column[:-3] possibilities.append(column_without_id) if not column_without_id.endswith("s"): possibilities.append(column_without_id + "s") elif not column.endswith("s"): possibilities.append(column + "s") existing_tables = {t.lower(): t for t in self.db.table_names()} for table in possibilities: if table in existing_tables: return existing_tables[table] # If we get here there's no obvious candidate - raise an error raise NoObviousTable( "No obvious foreign key table for column '{}' - tried {}".format( column, repr(possibilities) ) ) def guess_foreign_column(self, other_table: str): pks = [c for c in self.db[other_table].columns if c.is_pk] if len(pks) != 1: raise BadPrimaryKey( "Could not detect single primary key for table '{}'".format(other_table) ) else: return pks[0].name def add_foreign_key( self, column: str, other_table: Optional[str] = None, other_column: Optional[str] = None, ignore: bool = False, ): """ Alter the schema to mark the specified column as a foreign key to another table. :param column: The column to mark as a foreign key. :param other_table: The table it refers to - if omitted, will be guessed based on the column name. :param other_column: The column on the other table it - if omitted, will be guessed. :param ignore: Set this to ``True`` to ignore an existing foreign key - otherwise a ``AlterError`` will be raised. """ # Ensure column exists if column not in self.columns_dict: raise AlterError("No such column: {}".format(column)) # If other_table is not specified, attempt to guess it from the column if other_table is None: other_table = self.guess_foreign_table(column) # If other_column is not specified, detect the primary key on other_table if other_column is None: other_column = self.guess_foreign_column(other_table) # Soundness check that the other column exists if ( not [c for c in self.db[other_table].columns if c.name == other_column] and other_column != "rowid" ): raise AlterError("No such column: {}.{}".format(other_table, other_column)) # Check we do not already have an existing foreign key if any( fk for fk in self.foreign_keys if fk.column == column and fk.other_table == other_table and fk.other_column == other_column ): if ignore: return self else: raise AlterError( "Foreign key already exists for {} => {}.{}".format( column, other_table, other_column ) ) self.db.add_foreign_keys([(self.name, column, other_table, other_column)]) return self def enable_counts(self): """ Set up triggers to update a cache of the count of rows in this table. See :ref:`python_api_cached_table_counts` for details. """ sql = ( textwrap.dedent( """ {create_counts_table} CREATE TRIGGER IF NOT EXISTS [{table}{counts_table}_insert] AFTER INSERT ON [{table}] BEGIN INSERT OR REPLACE INTO [{counts_table}] VALUES ( {table_quoted}, COALESCE( (SELECT count FROM [{counts_table}] WHERE [table] = {table_quoted}), 0 ) + 1 ); END; CREATE TRIGGER IF NOT EXISTS [{table}{counts_table}_delete] AFTER DELETE ON [{table}] BEGIN INSERT OR REPLACE INTO [{counts_table}] VALUES ( {table_quoted}, COALESCE( (SELECT count FROM [{counts_table}] WHERE [table] = {table_quoted}), 0 ) - 1 ); END; INSERT OR REPLACE INTO _counts VALUES ({table_quoted}, (select count(*) from [{table}])); """ ) .strip() .format( create_counts_table=_COUNTS_TABLE_CREATE_SQL.format( self.db._counts_table_name ), counts_table=self.db._counts_table_name, table=self.name, table_quoted=self.db.quote(self.name), ) ) with self.db.conn: self.db.conn.executescript(sql) self.db.use_counts_table = True @property def has_counts_triggers(self) -> bool: "Does this table have triggers setup to update cached counts?" trigger_names = { "{table}{counts_table}_{suffix}".format( counts_table=self.db._counts_table_name, table=self.name, suffix=suffix ) for suffix in ["insert", "delete"] } return trigger_names.issubset(self.triggers_dict.keys()) def enable_fts( self, columns: Iterable[str], fts_version: str = "FTS5", create_triggers: bool = False, tokenize: Optional[str] = None, replace: bool = False, ): """ Enable SQLite full-text search against the specified columns. See :ref:`python_api_fts` for more details. :param columns: List of column names to include in the search index. :param fts_version: FTS version to use - defaults to ``FTS5`` but you may want ``FTS4`` for older SQLite versions. :param create_triggers: Should triggers be created to keep the search index up-to-date? Defaults to ``False``. :param tokenize: Custom SQLite tokenizer to use, for example ``"porter"`` to enable Porter stemming. :param replace: Should any existing FTS index for this table be replaced by the new one? """ create_fts_sql = ( textwrap.dedent( """ CREATE VIRTUAL TABLE [{table}_fts] USING {fts_version} ( {columns},{tokenize} content=[{table}] ) """ ) .strip() .format( table=self.name, columns=", ".join("[{}]".format(c) for c in columns), fts_version=fts_version, tokenize="\n tokenize='{}',".format(tokenize) if tokenize else "", ) ) should_recreate = False if replace and self.db["{}_fts".format(self.name)].exists(): # Does the table need to be recreated? fts_schema = self.db["{}_fts".format(self.name)].schema if fts_schema != create_fts_sql: should_recreate = True expected_triggers = {self.name + suffix for suffix in ("_ai", "_ad", "_au")} existing_triggers = {t.name for t in self.triggers} has_triggers = existing_triggers.issuperset(expected_triggers) if has_triggers != create_triggers: should_recreate = True if not should_recreate: # Table with correct configuration already exists return self if should_recreate: self.disable_fts() self.db.executescript(create_fts_sql) self.populate_fts(columns) if create_triggers: old_cols = ", ".join("old.[{}]".format(c) for c in columns) new_cols = ", ".join("new.[{}]".format(c) for c in columns) triggers = ( textwrap.dedent( """ CREATE TRIGGER [{table}_ai] AFTER INSERT ON [{table}] BEGIN INSERT INTO [{table}_fts] (rowid, {columns}) VALUES (new.rowid, {new_cols}); END; CREATE TRIGGER [{table}_ad] AFTER DELETE ON [{table}] BEGIN INSERT INTO [{table}_fts] ([{table}_fts], rowid, {columns}) VALUES('delete', old.rowid, {old_cols}); END; CREATE TRIGGER [{table}_au] AFTER UPDATE ON [{table}] BEGIN INSERT INTO [{table}_fts] ([{table}_fts], rowid, {columns}) VALUES('delete', old.rowid, {old_cols}); INSERT INTO [{table}_fts] (rowid, {columns}) VALUES (new.rowid, {new_cols}); END; """ ) .strip() .format( table=self.name, columns=", ".join("[{}]".format(c) for c in columns), old_cols=old_cols, new_cols=new_cols, ) ) self.db.executescript(triggers) return self def populate_fts(self, columns: Iterable[str]) -> "Table": """ Update the associated SQLite full-text search index with the latest data from the table for the specified columns. :param columns: Columns to populate the data for """ sql = ( textwrap.dedent( """ INSERT INTO [{table}_fts] (rowid, {columns}) SELECT rowid, {columns} FROM [{table}]; """ ) .strip() .format( table=self.name, columns=", ".join("[{}]".format(c) for c in columns) ) ) self.db.executescript(sql) return self def disable_fts(self) -> "Table": "Remove any full-text search index and related triggers configured for this table." fts_table = self.detect_fts() if fts_table: self.db[fts_table].drop() # Now delete the triggers that related to that table sql = ( textwrap.dedent( """ SELECT name FROM sqlite_master WHERE type = 'trigger' AND sql LIKE '% INSERT INTO [{}]%' """ ) .strip() .format(fts_table) ) trigger_names = [] for row in self.db.execute(sql).fetchall(): trigger_names.append(row[0]) with self.db.conn: for trigger_name in trigger_names: self.db.execute("DROP TRIGGER IF EXISTS [{}]".format(trigger_name)) return self def rebuild_fts(self): "Run the ``rebuild`` operation against the associated full-text search index table." fts_table = self.detect_fts() if fts_table is None: # Assume this is itself an FTS table fts_table = self.name self.db.execute( "INSERT INTO [{table}]([{table}]) VALUES('rebuild');".format( table=fts_table ) ) return self def detect_fts(self) -> Optional[str]: "Detect if table has a corresponding FTS virtual table and return it" sql = textwrap.dedent( """ SELECT name FROM sqlite_master WHERE rootpage = 0 AND ( sql LIKE :like OR sql LIKE :like2 OR ( tbl_name = :table AND sql LIKE '%VIRTUAL TABLE%USING FTS%' ) ) """ ).strip() args = { "like": "%VIRTUAL TABLE%USING FTS%content=[{}]%".format(self.name), "like2": '%VIRTUAL TABLE%USING FTS%content="{}"%'.format(self.name), "table": self.name, } rows = self.db.execute(sql, args).fetchall() if len(rows) == 0: return None else: return rows[0][0] def optimize(self) -> "Table": "Run the ``optimize`` operation against the associated full-text search index table." fts_table = self.detect_fts() if fts_table is not None: self.db.execute( """ INSERT INTO [{table}] ([{table}]) VALUES ("optimize"); """.strip().format( table=fts_table ) ) return self def search_sql( self, columns: Optional[Iterable[str]] = None, order_by: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, where: Optional[str] = None, include_rank: bool = False, ) -> str: """ " Return SQL string that can be used to execute searches against this table. :param columns: Columns to search against :param order_by: Column or SQL expression to sort by :param limit: SQL limit :param offset: SQL offset :param where: Extra SQL fragment for the WHERE clause :param include_rank: Select the search rank column in the final query """ # Pick names for table and rank column that don't clash original = "original_" if self.name == "original" else "original" columns_sql = "*" columns_with_prefix_sql = "[{}].*".format(original) if columns: columns_sql = ",\n ".join("[{}]".format(c) for c in columns) columns_with_prefix_sql = ",\n ".join( "[{}].[{}]".format(original, c) for c in columns ) fts_table = self.detect_fts() assert fts_table, "Full-text search is not configured for table '{}'".format( self.name ) virtual_table_using = self.db[fts_table].virtual_table_using sql = textwrap.dedent( """ with {original} as ( select rowid, {columns} from [{dbtable}]{where_clause} ) select {columns_with_prefix} from [{original}] join [{fts_table}] on [{original}].rowid = [{fts_table}].rowid where [{fts_table}] match :query order by {order_by} {limit_offset} """ ).strip() if virtual_table_using == "FTS5": rank_implementation = "[{}].rank".format(fts_table) else: self.db.register_fts4_bm25() rank_implementation = "rank_bm25(matchinfo([{}], 'pcnalx'))".format( fts_table ) if include_rank: columns_with_prefix_sql += ",\n " + rank_implementation + " rank" limit_offset = "" if limit is not None: limit_offset += " limit {}".format(limit) if offset is not None: limit_offset += " offset {}".format(offset) return sql.format( dbtable=self.name, where_clause="\n where {}".format(where) if where else "", original=original, columns=columns_sql, columns_with_prefix=columns_with_prefix_sql, fts_table=fts_table, order_by=order_by or rank_implementation, limit_offset=limit_offset.strip(), ).strip() def search( self, q: str, order_by: Optional[str] = None, columns: Optional[Iterable[str]] = None, limit: Optional[int] = None, offset: Optional[int] = None, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, quote: bool = False, ) -> Generator[dict, None, None]: """ Execute a search against this table using SQLite full-text search, returning a sequence of dictionaries for each row. :param q: Terms to search for :param order_by: Defaults to order by rank, or specify a column here. :param columns: List of columns to return, defaults to all columns. :param limit: Optional integer limit for returned rows. :param offset: Optional integer SQL offset. :param where: Extra SQL fragment for the WHERE clause :param where_args: Arguments to use for :param placeholders in the extra WHERE clause :param quote: Apply quoting to disable any special characters in the search query See :ref:`python_api_fts_search`. """ args = {"query": self.db.quote_fts(q) if quote else q} if where_args and "query" in where_args: raise ValueError( "'query' is a reserved key and cannot be passed to where_args for .search()" ) if where_args: args.update(where_args) cursor = self.db.execute( self.search_sql( order_by=order_by, columns=columns, limit=limit, offset=offset, where=where, ), args, ) columns = [c[0] for c in cursor.description] for row in cursor: yield dict(zip(columns, row)) def value_or_default(self, key, value): return self._defaults[key] if value is DEFAULT else value def delete(self, pk_values: Union[list, tuple, str, int, float]) -> "Table": """ Delete row matching the specified primary key. :param pk_values: A single value, or a tuple of values for tables that have a compound primary key """ if not isinstance(pk_values, (list, tuple)): pk_values = [pk_values] self.get(pk_values) wheres = ["[{}] = ?".format(pk_name) for pk_name in self.pks] sql = "delete from [{table}] where {wheres}".format( table=self.name, wheres=" and ".join(wheres) ) with self.db.conn: self.db.execute(sql, pk_values) return self def delete_where( self, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, analyze: bool = False, ) -> "Table": """ Delete rows matching the specified where clause, or delete all rows in the table. See :ref:`python_api_delete_where`. :param where: SQL where fragment to use, for example ``id > ?`` :param where_args: Parameters to use with that fragment - an iterable for ``id > ?`` parameters, or a dictionary for ``id > :id`` :param analyze: Set to ``True`` to run ``ANALYZE`` after the rows have been deleted. """ if not self.exists(): return self sql = "delete from [{}]".format(self.name) if where is not None: sql += " where " + where self.db.execute(sql, where_args or []) if analyze: self.analyze() return self def update( self, pk_values: Union[list, tuple, str, int, float], updates: Optional[dict] = None, alter: bool = False, conversions: Optional[dict] = None, ) -> "Table": """ Execute a SQL ``UPDATE`` against the specified row. See :ref:`python_api_update`. :param pk_values: The primary key of an individual record - can be a tuple if the table has a compound primary key. :param updates: A dictionary mapping columns to their updated values. :param alter: Set to ``True`` to add any missing columns. :param conversions: Optional dictionary of SQL functions to apply during the update, for example ``{"mycolumn": "upper(?)"}``. """ updates = updates or {} conversions = conversions or {} if not isinstance(pk_values, (list, tuple)): pk_values = [pk_values] # Soundness check that the record exists (raises error if not): self.get(pk_values) if not updates: return self args = [] sets = [] wheres = [] pks = self.pks validate_column_names(updates.keys()) for key, value in updates.items(): sets.append("[{}] = {}".format(key, conversions.get(key, "?"))) args.append(jsonify_if_needed(value)) wheres = ["[{}] = ?".format(pk_name) for pk_name in pks] args.extend(pk_values) sql = "update [{table}] set {sets} where {wheres}".format( table=self.name, sets=", ".join(sets), wheres=" and ".join(wheres) ) with self.db.conn: try: rowcount = self.db.execute(sql, args).rowcount except OperationalError as e: if alter and (" column" in e.args[0]): # Attempt to add any missing columns, then try again self.add_missing_columns([updates]) rowcount = self.db.execute(sql, args).rowcount else: raise # TODO: Test this works (rolls back) - use better exception: assert rowcount == 1 self.last_pk = pk_values[0] if len(pks) == 1 else pk_values return self def convert( self, columns: Union[str, List[str]], fn: Callable, output: Optional[str] = None, output_type: Optional[Any] = None, drop: bool = False, multi: bool = False, where: Optional[str] = None, where_args: Optional[Union[Iterable, dict]] = None, show_progress: bool = False, skip_false: bool = True, ): """ Apply conversion function ``fn`` to every value in the specified columns. :param columns: A single column or list of string column names to convert. :param fn: A callable that takes a single argument, ``value``, and returns it converted. :param output: Optional string column name to write the results to (defaults to the input column). :param output_type: If the output column needs to be created, this is the type that will be used for the new column. :param drop: Should the original column be dropped once the conversion is complete? :param multi: If ``True`` the return value of ``fn(value)`` will be expected to be a dictionary, and new columns will be created for each key of that dictionary. :param where: SQL fragment to use as a ``WHERE`` clause to limit the rows to which the conversion is applied, for example ``age > ?`` or ``age > :age``. :param where_args: List of arguments (if using ``?``) or a dictionary (if using ``:age``). :param show_progress: Should a progress bar be displayed? See :ref:`python_api_convert`. """ if isinstance(columns, str): columns = [columns] if multi: return self._convert_multi( columns[0], fn, drop=drop, where=where, where_args=where_args, show_progress=show_progress, ) if output is not None: assert len(columns) == 1, "output= can only be used with a single column" if output not in self.columns_dict: self.add_column(output, output_type or "text") todo_count = self.count_where(where, where_args) * len(columns) with progressbar(length=todo_count, silent=not show_progress) as bar: def convert_value(v): bar.update(1) if skip_false and not v: return v return jsonify_if_needed(fn(v)) fn_name = fn.__name__ if fn_name == "": fn_name = f"lambda_{abs(hash(fn))}" self.db.register_function(convert_value, name=fn_name) sql = "update [{table}] set {sets}{where};".format( table=self.name, sets=", ".join( [ "[{output_column}] = {fn_name}([{column}])".format( output_column=output or column, column=column, fn_name=fn_name, ) for column in columns ] ), where=" where {}".format(where) if where is not None else "", ) with self.db.conn: self.db.execute(sql, where_args or []) if drop: self.transform(drop=columns) return self def _convert_multi( self, column, fn, drop, show_progress, where=None, where_args=None ): # First we execute the function pk_to_values = {} new_column_types = {} pks = [column.name for column in self.columns if column.is_pk] if not pks: pks = ["rowid"] with progressbar( length=self.count, silent=not show_progress, label="1: Evaluating" ) as bar: for row in self.rows_where( select=", ".join( "[{}]".format(column_name) for column_name in (pks + [column]) ), where=where, where_args=where_args, ): row_pk = tuple(row[pk] for pk in pks) if len(row_pk) == 1: row_pk = row_pk[0] values = fn(row[column]) if values is not None and not isinstance(values, dict): raise BadMultiValues(values) if values: for key, value in values.items(): new_column_types.setdefault(key, set()).add(type(value)) pk_to_values[row_pk] = values bar.update(1) # Add any new columns columns_to_create = types_for_column_types(new_column_types) for column_name, column_type in columns_to_create.items(): if column_name not in self.columns_dict: self.add_column(column_name, column_type) # Run the updates with progressbar( length=self.count, silent=not show_progress, label="2: Updating" ) as bar: with self.db.conn: for pk, updates in pk_to_values.items(): self.update(pk, updates) bar.update(1) if drop: self.transform(drop=(column,)) def build_insert_queries_and_params( self, extracts, chunk, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ): # values is the list of insert data that is passed to the # .execute() method - but some of them may be replaced by # new primary keys if we are extracting any columns. values = [] if hash_id_columns and hash_id is None: hash_id = "id" extracts = resolve_extracts(extracts) for record in chunk: record_values = [] for key in all_columns: value = jsonify_if_needed( record.get( key, None if key != hash_id else hash_record(record, hash_id_columns), ) ) if key in extracts: extract_table = extracts[key] value = self.db[extract_table].lookup({"value": value}) record_values.append(value) values.append(record_values) queries_and_params = [] if upsert: if isinstance(pk, str): pks = [pk] else: pks = pk self.last_pk = None for record_values in values: record = dict(zip(all_columns, record_values)) placeholders = list(pks) # Need to populate not-null columns too, or INSERT OR IGNORE ignores # them since it ignores the resulting integrity errors if not_null: placeholders.extend(not_null) sql = "INSERT OR IGNORE INTO [{table}]({cols}) VALUES({placeholders});".format( table=self.name, cols=", ".join(["[{}]".format(p) for p in placeholders]), placeholders=", ".join(["?" for p in placeholders]), ) queries_and_params.append( (sql, [record[col] for col in pks] + ["" for _ in (not_null or [])]) ) # UPDATE [book] SET [name] = 'Programming' WHERE [id] = 1001; set_cols = [col for col in all_columns if col not in pks] if set_cols: sql2 = "UPDATE [{table}] SET {pairs} WHERE {wheres}".format( table=self.name, pairs=", ".join( "[{}] = {}".format(col, conversions.get(col, "?")) for col in set_cols ), wheres=" AND ".join("[{}] = ?".format(pk) for pk in pks), ) queries_and_params.append( ( sql2, [record[col] for col in set_cols] + [record[pk] for pk in pks], ) ) # We can populate .last_pk right here if num_records_processed == 1: self.last_pk = tuple(record[pk] for pk in pks) if len(self.last_pk) == 1: self.last_pk = self.last_pk[0] else: or_what = "" if replace: or_what = "OR REPLACE " elif ignore: or_what = "OR IGNORE " sql = """ INSERT {or_what}INTO [{table}] ({columns}) VALUES {rows}; """.strip().format( or_what=or_what, table=self.name, columns=", ".join("[{}]".format(c) for c in all_columns), rows=", ".join( "({placeholders})".format( placeholders=", ".join( [conversions.get(col, "?") for col in all_columns] ) ) for record in chunk ), ) flat_values = list(itertools.chain(*values)) queries_and_params = [(sql, flat_values)] return queries_and_params def insert_chunk( self, alter, extracts, chunk, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ): queries_and_params = self.build_insert_queries_and_params( extracts, chunk, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ) with self.db.conn: result = None for query, params in queries_and_params: try: result = self.db.execute(query, params) except OperationalError as e: if alter and (" column" in e.args[0]): # Attempt to add any missing columns, then try again self.add_missing_columns(chunk) result = self.db.execute(query, params) elif e.args[0] == "too many SQL variables": first_half = chunk[: len(chunk) // 2] second_half = chunk[len(chunk) // 2 :] self.insert_chunk( alter, extracts, first_half, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ) self.insert_chunk( alter, extracts, second_half, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ) else: raise if num_records_processed == 1 and not upsert: self.last_rowid = result.lastrowid self.last_pk = self.last_rowid # self.last_rowid will be 0 if a "INSERT OR IGNORE" happened if (hash_id or pk) and self.last_rowid: row = list(self.rows_where("rowid = ?", [self.last_rowid]))[0] if hash_id: self.last_pk = row[hash_id] elif isinstance(pk, str): self.last_pk = row[pk] else: self.last_pk = tuple(row[p] for p in pk) return def insert( self, record: Dict[str, Any], pk=DEFAULT, foreign_keys=DEFAULT, column_order: Optional[Union[List[str], Default]] = DEFAULT, not_null: Optional[Union[Iterable[str], Default]] = DEFAULT, defaults: Optional[Union[Dict[str, Any], Default]] = DEFAULT, hash_id: Optional[Union[str, Default]] = DEFAULT, hash_id_columns: Optional[Union[Iterable[str], Default]] = DEFAULT, alter: Optional[Union[bool, Default]] = DEFAULT, ignore: Optional[Union[bool, Default]] = DEFAULT, replace: Optional[Union[bool, Default]] = DEFAULT, extracts: Optional[Union[Dict[str, str], List[str], Default]] = DEFAULT, conversions: Optional[Union[Dict[str, str], Default]] = DEFAULT, columns: Optional[Union[Dict[str, Any], Default]] = DEFAULT, ) -> "Table": """ Insert a single record into the table. The table will be created with a schema that matches the inserted record if it does not already exist, see :ref:`python_api_creating_tables`. - ``record`` - required: a dictionary representing the record to be inserted. The other parameters are optional, and mostly influence how the new table will be created if that table does not exist yet. Each of them defaults to ``DEFAULT``, which indicates that the default setting for the current ``Table`` object (specified in the table constructor) should be used. :param record: Dictionary record to be inserted :param pk: If creating the table, which column should be the primary key. :param foreign_keys: See :ref:`python_api_foreign_keys`. :param column_order: List of strings specifying a full or partial column order to use when creating the table. :param not_null: Set of strings specifying columns that should be ``NOT NULL``. :param defaults: Dictionary specifying default values for specific columns. :param hash_id: Name of a column to create and use as a primary key, where the value of thet primary key will be derived as a SHA1 hash of the other column values in the record. ``hash_id="id"`` is a common column name used for this. :param alter: Boolean, should any missing columns be added automatically? :param ignore: Boolean, if a record already exists with this primary key, ignore this insert. :param replace: Boolean, if a record already exists with this primary key, replace it with this new record. :param extracts: A list of columns to extract to other tables, or a dictionary that maps ``{column_name: other_table_name}``. See :ref:`python_api_extracts`. :param conversions: Dictionary specifying SQL conversion functions to be applied to the data while it is being inserted, for example ``{"name": "upper(?)"}``. See :ref:`python_api_conversions`. :param columns: Dictionary over-riding the detected types used for the columns, for example ``{"age": int, "weight": float}``. """ return self.insert_all( [record], pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, alter=alter, ignore=ignore, replace=replace, extracts=extracts, conversions=conversions, columns=columns, ) def insert_all( self, records, pk=DEFAULT, foreign_keys=DEFAULT, column_order=DEFAULT, not_null=DEFAULT, defaults=DEFAULT, batch_size=DEFAULT, hash_id=DEFAULT, hash_id_columns=DEFAULT, alter=DEFAULT, ignore=DEFAULT, replace=DEFAULT, truncate=False, extracts=DEFAULT, conversions=DEFAULT, columns=DEFAULT, upsert=False, analyze=False, ) -> "Table": """ Like ``.insert()`` but takes a list of records and ensures that the table that it creates (if table does not exist) has columns for ALL of that data. Use ``analyze=True`` to run ``ANALYZE`` after the insert has completed. """ pk = self.value_or_default("pk", pk) foreign_keys = self.value_or_default("foreign_keys", foreign_keys) column_order = self.value_or_default("column_order", column_order) not_null = self.value_or_default("not_null", not_null) defaults = self.value_or_default("defaults", defaults) batch_size = self.value_or_default("batch_size", batch_size) hash_id = self.value_or_default("hash_id", hash_id) hash_id_columns = self.value_or_default("hash_id_columns", hash_id_columns) alter = self.value_or_default("alter", alter) ignore = self.value_or_default("ignore", ignore) replace = self.value_or_default("replace", replace) extracts = self.value_or_default("extracts", extracts) conversions = self.value_or_default("conversions", conversions) or {} columns = self.value_or_default("columns", columns) if hash_id_columns and hash_id is None: hash_id = "id" if upsert and (not pk and not hash_id): raise PrimaryKeyRequired("upsert() requires a pk") assert not (hash_id and pk), "Use either pk= or hash_id=" if hash_id_columns and (hash_id is None): hash_id = "id" if hash_id: pk = hash_id assert not ( ignore and replace ), "Use either ignore=True or replace=True, not both" all_columns = [] first = True num_records_processed = 0 # Fix up any records with square braces in the column names records = fix_square_braces(records) # We can only handle a max of 999 variables in a SQL insert, so # we need to adjust the batch_size down if we have too many cols records = iter(records) # Peek at first record to count its columns: try: first_record = next(records) except StopIteration: return self # It was an empty list num_columns = len(first_record.keys()) assert ( num_columns <= SQLITE_MAX_VARS ), "Rows can have a maximum of {} columns".format(SQLITE_MAX_VARS) batch_size = max(1, min(batch_size, SQLITE_MAX_VARS // num_columns)) self.last_rowid = None self.last_pk = None if truncate and self.exists(): self.db.execute("DELETE FROM [{}];".format(self.name)) for chunk in chunks(itertools.chain([first_record], records), batch_size): chunk = list(chunk) num_records_processed += len(chunk) if first: if not self.exists(): # Use the first batch to derive the table names column_types = suggest_column_types(chunk) column_types.update(columns or {}) self.create( column_types, pk, foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, extracts=extracts, ) all_columns_set = set() for record in chunk: all_columns_set.update(record.keys()) all_columns = list(sorted(all_columns_set)) if hash_id: all_columns.insert(0, hash_id) else: for record in chunk: all_columns += [ column for column in record if column not in all_columns ] first = False self.insert_chunk( alter, extracts, chunk, all_columns, hash_id, hash_id_columns, upsert, pk, not_null, conversions, num_records_processed, replace, ignore, ) if analyze: self.analyze() return self def upsert( self, record, pk=DEFAULT, foreign_keys=DEFAULT, column_order=DEFAULT, not_null=DEFAULT, defaults=DEFAULT, hash_id=DEFAULT, hash_id_columns=DEFAULT, alter=DEFAULT, extracts=DEFAULT, conversions=DEFAULT, columns=DEFAULT, ) -> "Table": """ Like ``.insert()`` but performs an ``UPSERT``, where records are inserted if they do not exist and updated if they DO exist, based on matching against their primary key. See :ref:`python_api_upsert`. """ return self.upsert_all( [record], pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, hash_id=hash_id, hash_id_columns=hash_id_columns, alter=alter, extracts=extracts, conversions=conversions, columns=columns, ) def upsert_all( self, records, pk=DEFAULT, foreign_keys=DEFAULT, column_order=DEFAULT, not_null=DEFAULT, defaults=DEFAULT, batch_size=DEFAULT, hash_id=DEFAULT, hash_id_columns=DEFAULT, alter=DEFAULT, extracts=DEFAULT, conversions=DEFAULT, columns=DEFAULT, analyze=False, ) -> "Table": """ Like ``.upsert()`` but can be applied to a list of records. """ return self.insert_all( records, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, batch_size=batch_size, hash_id=hash_id, hash_id_columns=hash_id_columns, alter=alter, extracts=extracts, conversions=conversions, columns=columns, upsert=True, analyze=analyze, ) def add_missing_columns(self, records: Iterable[Dict[str, Any]]) -> "Table": needed_columns = suggest_column_types(records) current_columns = {c.lower() for c in self.columns_dict} for col_name, col_type in needed_columns.items(): if col_name.lower() not in current_columns: self.add_column(col_name, col_type) return self def lookup( self, lookup_values: Dict[str, Any], extra_values: Optional[Dict[str, Any]] = None, pk: Optional[str] = "id", foreign_keys: Optional[ForeignKeysType] = None, column_order: Optional[List[str]] = None, not_null: Optional[Iterable[str]] = None, defaults: Optional[Dict[str, Any]] = None, extracts: Optional[Union[Dict[str, str], List[str]]] = None, conversions: Optional[Dict[str, str]] = None, columns: Optional[Dict[str, Any]] = None, ): """ Create or populate a lookup table with the specified values. ``db["Species"].lookup({"name": "Palm"})`` will create a table called ``Species`` (if one does not already exist) with two columns: ``id`` and ``name``. It will set up a unique constraint on the ``name`` column to guarantee it will not contain duplicate rows. It will then insert a new row with the ``name`` set to ``Palm`` and return the new integer primary key value. An optional second argument can be provided with more ``name: value`` pairs to be included only if the record is being created for the first time. These will be ignored on subsequent lookup calls for records that already exist. All other keyword arguments are passed through to ``.insert()``. See :ref:`python_api_lookup_tables` for more details. :param lookup_values: Dictionary specifying column names and values to use for the lookup :param extra_values: Additional column values to be used only if creating a new record """ assert isinstance(lookup_values, dict) if extra_values is not None: assert isinstance(extra_values, dict) combined_values = dict(lookup_values) if extra_values is not None: combined_values.update(extra_values) if self.exists(): self.add_missing_columns([combined_values]) unique_column_sets = [set(i.columns) for i in self.indexes] if set(lookup_values.keys()) not in unique_column_sets: self.create_index(lookup_values.keys(), unique=True) wheres = ["[{}] = ?".format(column) for column in lookup_values] rows = list( self.rows_where( " and ".join(wheres), [value for _, value in lookup_values.items()] ) ) try: return rows[0][pk] except IndexError: return self.insert( combined_values, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, extracts=extracts, conversions=conversions, columns=columns, ).last_pk else: pk = self.insert( combined_values, pk=pk, foreign_keys=foreign_keys, column_order=column_order, not_null=not_null, defaults=defaults, extracts=extracts, conversions=conversions, columns=columns, ).last_pk self.create_index(lookup_values.keys(), unique=True) return pk def m2m( self, other_table: Union[str, "Table"], record_or_iterable: Optional[ Union[Iterable[Dict[str, Any]], Dict[str, Any]] ] = None, pk: Optional[Union[Any, Default]] = DEFAULT, lookup: Optional[Dict[str, Any]] = None, m2m_table: Optional[str] = None, alter: bool = False, ): """ After inserting a record in a table, create one or more records in some other table and then create many-to-many records linking the original record and the newly created records together. For example:: db["dogs"].insert({"id": 1, "name": "Cleo"}, pk="id").m2m( "humans", {"id": 1, "name": "Natalie"}, pk="id" ) See :ref:`python_api_m2m` for details. :param other_table: The name of the table to insert the new records into. :param record_or_iterable: A single dictionary record to insert, or a list of records. :param pk: The primary key to use if creating ``other_table``. :param lookup: Same dictionary as for ``.lookup()``, to create a many-to-many lookup table. :param m2m_table: The string name to use for the many-to-many table, defaults to creating this automatically based on the names of the two tables. :param alter: Set to ``True`` to add any missing columns on ``other_table`` if that table already exists. """ if isinstance(other_table, str): other_table = cast(Table, self.db.table(other_table, pk=pk)) our_id = self.last_pk if lookup is not None: assert record_or_iterable is None, "Provide lookup= or record, not both" else: assert record_or_iterable is not None, "Provide lookup= or record, not both" tables = list(sorted([self.name, other_table.name])) columns = ["{}_id".format(t) for t in tables] if m2m_table is not None: m2m_table_name = m2m_table else: # Detect if there is a single, unambiguous option candidates = self.db.m2m_table_candidates(self.name, other_table.name) if len(candidates) == 1: m2m_table_name = candidates[0] elif len(candidates) > 1: raise NoObviousTable( "No single obvious m2m table for {}, {} - use m2m_table= parameter".format( self.name, other_table.name ) ) else: # If not, create a new table m2m_table_name = m2m_table or "{}_{}".format(*tables) m2m_table_obj = self.db.table(m2m_table_name, pk=columns, foreign_keys=columns) if lookup is None: # if records is only one record, put the record in a list if isinstance(record_or_iterable, Mapping): records = [record_or_iterable] else: records = cast(List, record_or_iterable) # Ensure each record exists in other table for record in records: id = other_table.insert( cast(dict, record), pk=pk, replace=True, alter=alter ).last_pk m2m_table_obj.insert( { "{}_id".format(other_table.name): id, "{}_id".format(self.name): our_id, }, replace=True, ) else: id = other_table.lookup(lookup) m2m_table_obj.insert( { "{}_id".format(other_table.name): id, "{}_id".format(self.name): our_id, }, replace=True, ) return self def analyze(self): "Run ANALYZE against this table" self.db.analyze(self.name) def analyze_column( self, column: str, common_limit: int = 10, value_truncate=None, total_rows=None, most_common: bool = True, least_common: bool = True, ) -> "ColumnDetails": """ Return statistics about the specified column. See :ref:`python_api_analyze_column`. :param column: Column to analyze :param common_limit: Show this many column values :param value_truncate: Truncate display of common values to this many characters :param total_rows: Optimization - pass the total number of rows in the table to save running a fresh ``count(*)`` query :param most_common: If ``True``, calculate the most common values :param least_common: If ``True``, calculate the least common values """ db = self.db table = self.name if total_rows is None: total_rows = db[table].count def truncate(value): if value_truncate is None or isinstance(value, (float, int)): return value value = str(value) if len(value) > value_truncate: value = value[:value_truncate] + "..." return value num_null = db.execute( "select count(*) from [{}] where [{}] is null".format(table, column) ).fetchone()[0] num_blank = db.execute( "select count(*) from [{}] where [{}] = ''".format(table, column) ).fetchone()[0] num_distinct = db.execute( "select count(distinct [{}]) from [{}]".format(column, table) ).fetchone()[0] most_common_results = None least_common_results = None if num_distinct == 1: value = db.execute( "select [{}] from [{}] limit 1".format(column, table) ).fetchone()[0] most_common_results = [(truncate(value), total_rows)] elif num_distinct != total_rows: if most_common: # Optimization - if all rows are null, don't run this query if num_null == total_rows: most_common_results = [(None, total_rows)] else: most_common_results = [ (truncate(r[0]), r[1]) for r in db.execute( "select [{}], count(*) from [{}] group by [{}] order by count(*) desc, [{}] limit {}".format( column, table, column, column, common_limit ) ).fetchall() ] most_common_results.sort(key=lambda p: (p[1], p[0]), reverse=True) if least_common: if num_distinct <= common_limit: # No need to run the query if it will just return the results in reverse order least_common_results = None else: least_common_results = [ (truncate(r[0]), r[1]) for r in db.execute( "select [{}], count(*) from [{}] group by [{}] order by count(*), [{}] desc limit {}".format( column, table, column, column, common_limit ) ).fetchall() ] least_common_results.sort(key=lambda p: (p[1], p[0])) return ColumnDetails( self.name, column, total_rows, num_null, num_blank, num_distinct, most_common_results, least_common_results, ) def add_geometry_column( self, column_name: str, geometry_type: str, srid: int = 4326, coord_dimension: str = "XY", not_null: bool = False, ) -> bool: """ In SpatiaLite, a geometry column can only be added to an existing table. To do so, use ``table.add_geometry_column``, passing in a geometry type. By default, this will add a nullable column using `SRID 4326 `__. This can be customized using the ``column_name``, ``srid`` and ``not_null`` arguments. Returns ``True`` if the column was successfully added, ``False`` if not. .. code-block:: python from sqlite_utils.db import Database from sqlite_utils.utils import find_spatialite db = Database("mydb.db") db.init_spatialite(find_spatialite()) # the table must exist before adding a geometry column table = db["locations"].create({"name": str}) table.add_geometry_column("geometry", "POINT") :param column_name: Name of column to add :param geometry_type: Type of geometry column, for example ``"GEOMETRY"`` or ``"POINT" or ``"POLYGON"`` :param srid: Integer SRID, defaults to 4326 for WGS84 :param coord_dimension: Dimensions to use, defaults to ``"XY"`` - set to ``"XYZ"`` to work in three dimensions :param not_null: Should the column be ``NOT NULL`` """ cursor = self.db.execute( "SELECT AddGeometryColumn(?, ?, ?, ?, ?, ?);", [ self.name, column_name, srid, geometry_type, coord_dimension, int(not_null), ], ) result = cursor.fetchone() return result and bool(result[0]) def create_spatial_index(self, column_name) -> bool: """ A spatial index allows for significantly faster bounding box queries. To create one, use ``create_spatial_index`` with the name of an existing geometry column. Returns ``True`` if the index was successfully created, ``False`` if not. Calling this function if an index already exists is a no-op. .. code-block:: python # assuming SpatiaLite is loaded, create the table, add the column table = db["locations"].create({"name": str}) table.add_geometry_column("geometry", "POINT") # now we can index it table.create_spatial_index("geometry") # the spatial index is a virtual table, which we can inspect print(db["idx_locations_geometry"].schema) # outputs: # CREATE VIRTUAL TABLE "idx_locations_geometry" USING rtree(pkid, xmin, xmax, ymin, ymax) :param column_name: Geometry column to create the spatial index against """ if f"idx_{self.name}_{column_name}" in self.db.table_names(): return False cursor = self.db.execute( "select CreateSpatialIndex(?, ?)", [self.name, column_name] ) result = cursor.fetchone() return result and bool(result[0]) class View(Queryable): def exists(self): return True def __repr__(self) -> str: return "".format( self.name, ", ".join(c.name for c in self.columns) ) def drop(self, ignore=False): """ Drop this view. :param ignore: Set to ``True`` to ignore the error if the view does not exist """ try: self.db.execute("DROP VIEW [{}]".format(self.name)) except sqlite3.OperationalError: if not ignore: raise def enable_fts(self, *args, **kwargs): "``enable_fts()`` is supported on tables but not on views." raise NotImplementedError( "enable_fts() is supported on tables but not on views" ) def jsonify_if_needed(value): if isinstance(value, decimal.Decimal): return float(value) if isinstance(value, (dict, list, tuple)): return json.dumps(value, default=repr, ensure_ascii=False) elif isinstance(value, (datetime.time, datetime.date, datetime.datetime)): return value.isoformat() elif isinstance(value, datetime.timedelta): return str(value) elif isinstance(value, uuid.UUID): return str(value) else: return value def resolve_extracts( extracts: Optional[Union[Dict[str, str], List[str], Tuple[str]]] ) -> dict: if extracts is None: extracts = {} if isinstance(extracts, (list, tuple)): extracts = {item: item for item in extracts} return extracts def validate_column_names(columns): # Validate no columns contain '[' or ']' - #86 for column in columns: assert ( "[" not in column and "]" not in column ), "'[' and ']' cannot be used in column names" def fix_square_braces(records: Iterable[Dict[str, Any]]): for record in records: if any("[" in key or "]" in key for key in record.keys()): yield { key.replace("[", "_").replace("]", "_"): value for key, value in record.items() } else: yield record def _decode_default_value(value): if value.startswith("'") and value.endswith("'"): # It's a string return value[1:-1] if value.isdigit(): # It's an integer return int(value) if value.startswith("X'") and value.endswith("'"): # It's a binary string, stored as hex to_decode = value[2:-1] return binascii.unhexlify(to_decode) # If it is a string containing a floating point number: try: return float(value) except ValueError: pass return value sqlite-utils-3.35.2/sqlite_utils/hookspecs.py000066400000000000000000000006131452131415600213360ustar00rootroot00000000000000from pluggy import HookimplMarker from pluggy import HookspecMarker hookspec = HookspecMarker("sqlite_utils") hookimpl = HookimplMarker("sqlite_utils") @hookspec def register_commands(cli): """Register additional CLI commands, e.g. 'sqlite-utils mycommand ...'""" @hookspec def prepare_connection(conn): """Modify SQLite connection in some way e.g. register custom SQL functions""" sqlite-utils-3.35.2/sqlite_utils/plugins.py000066400000000000000000000014031452131415600210170ustar00rootroot00000000000000import pluggy import sys from . import hookspecs pm = pluggy.PluginManager("sqlite_utils") pm.add_hookspecs(hookspecs) if not getattr(sys, "_called_from_test", False): # Only load plugins if not running tests pm.load_setuptools_entrypoints("sqlite_utils") def get_plugins(): plugins = [] plugin_to_distinfo = dict(pm.list_plugin_distinfo()) for plugin in pm.get_plugins(): plugin_info = { "name": plugin.__name__, "hooks": [h.name for h in pm.get_hookcallers(plugin)], } distinfo = plugin_to_distinfo.get(plugin) if distinfo: plugin_info["version"] = distinfo.version plugin_info["name"] = distinfo.project_name plugins.append(plugin_info) return plugins sqlite-utils-3.35.2/sqlite_utils/py.typed000066400000000000000000000000001452131415600204530ustar00rootroot00000000000000sqlite-utils-3.35.2/sqlite_utils/recipes.py000066400000000000000000000032271452131415600207760ustar00rootroot00000000000000from dateutil import parser import json IGNORE = object() SET_NULL = object() def parsedate(value, dayfirst=False, yearfirst=False, errors=None): """ Parse a date and convert it to ISO date format: yyyy-mm-dd \b - dayfirst=True: treat xx as the day in xx/yy/zz - yearfirst=True: treat xx as the year in xx/yy/zz - errors=r.IGNORE to ignore values that cannot be parsed - errors=r.SET_NULL to set values that cannot be parsed to null """ try: return ( parser.parse(value, dayfirst=dayfirst, yearfirst=yearfirst) .date() .isoformat() ) except parser.ParserError: if errors is IGNORE: return value elif errors is SET_NULL: return None else: raise def parsedatetime(value, dayfirst=False, yearfirst=False, errors=None): """ Parse a datetime and convert it to ISO datetime format: yyyy-mm-ddTHH:MM:SS \b - dayfirst=True: treat xx as the day in xx/yy/zz - yearfirst=True: treat xx as the year in xx/yy/zz - errors=r.IGNORE to ignore values that cannot be parsed - errors=r.SET_NULL to set values that cannot be parsed to null """ try: return parser.parse(value, dayfirst=dayfirst, yearfirst=yearfirst).isoformat() except parser.ParserError: if errors is IGNORE: return value elif errors is SET_NULL: return None else: raise def jsonsplit(value, delimiter=",", type=str): """ Convert a string like a,b,c into a JSON array ["a", "b", "c"] """ return json.dumps([type(s.strip()) for s in value.split(delimiter)]) sqlite-utils-3.35.2/sqlite_utils/utils.py000066400000000000000000000413601452131415600205040ustar00rootroot00000000000000import base64 import contextlib import csv import enum import hashlib import io import itertools import json import os import sys from . import recipes from typing import Dict, cast, BinaryIO, Iterable, Optional, Tuple, Type import click try: import pysqlite3 as sqlite3 # noqa: F401 from pysqlite3 import dbapi2 # noqa: F401 OperationalError = dbapi2.OperationalError except ImportError: try: import sqlean as sqlite3 # noqa: F401 from sqlean import dbapi2 # noqa: F401 OperationalError = dbapi2.OperationalError except ImportError: import sqlite3 # noqa: F401 from sqlite3 import dbapi2 # noqa: F401 OperationalError = dbapi2.OperationalError SPATIALITE_PATHS = ( "/usr/lib/x86_64-linux-gnu/mod_spatialite.so", "/usr/lib/aarch64-linux-gnu/mod_spatialite.so", "/usr/local/lib/mod_spatialite.dylib", "/usr/local/lib/mod_spatialite.so", "/opt/homebrew/lib/mod_spatialite.dylib", ) # Mainly so we can restore it if needed in the tests: ORIGINAL_CSV_FIELD_SIZE_LIMIT = csv.field_size_limit() def maximize_csv_field_size_limit(): """ Increase the CSV field size limit to the maximum possible. """ # https://stackoverflow.com/a/15063941 field_size_limit = sys.maxsize while True: try: csv.field_size_limit(field_size_limit) break except OverflowError: field_size_limit = int(field_size_limit / 10) def find_spatialite() -> Optional[str]: """ The ``find_spatialite()`` function searches for the `SpatiaLite `__ SQLite extension in some common places. It returns a string path to the location, or ``None`` if SpatiaLite was not found. You can use it in code like this: .. code-block:: python from sqlite_utils import Database from sqlite_utils.utils import find_spatialite db = Database("mydb.db") spatialite = find_spatialite() if spatialite: db.conn.enable_load_extension(True) db.conn.load_extension(spatialite) # or use with db.init_spatialite like this db.init_spatialite(find_spatialite()) """ for path in SPATIALITE_PATHS: if os.path.exists(path): return path return None def suggest_column_types(records): all_column_types = {} for record in records: for key, value in record.items(): all_column_types.setdefault(key, set()).add(type(value)) return types_for_column_types(all_column_types) def types_for_column_types(all_column_types): column_types = {} for key, types in all_column_types.items(): # Ignore null values if at least one other type present: if len(types) > 1: types.discard(None.__class__) if {None.__class__} == types: t = str elif len(types) == 1: t = list(types)[0] # But if it's a subclass of list / tuple / dict, use str # instead as we will be storing it as JSON in the table for superclass in (list, tuple, dict): if issubclass(t, superclass): t = str elif {int, bool}.issuperset(types): t = int elif {int, float, bool}.issuperset(types): t = float elif {bytes, str}.issuperset(types): t = bytes else: t = str column_types[key] = t return column_types def column_affinity(column_type): # Implementation of SQLite affinity rules from # https://www.sqlite.org/datatype3.html#determination_of_column_affinity assert isinstance(column_type, str) column_type = column_type.upper().strip() if column_type == "": return str # We differ from spec, which says it should be BLOB if "INT" in column_type: return int if "CHAR" in column_type or "CLOB" in column_type or "TEXT" in column_type: return str if "BLOB" in column_type: return bytes if "REAL" in column_type or "FLOA" in column_type or "DOUB" in column_type: return float # Default is 'NUMERIC', which we currently also treat as float return float def decode_base64_values(doc): # Looks for '{"$base64": true..., "encoded": ...}' values and decodes them to_fix = [ k for k in doc if isinstance(doc[k], dict) and doc[k].get("$base64") is True and "encoded" in doc[k] ] if not to_fix: return doc return dict(doc, **{k: base64.b64decode(doc[k]["encoded"]) for k in to_fix}) class UpdateWrapper: def __init__(self, wrapped, update): self._wrapped = wrapped self._update = update def __iter__(self): for line in self._wrapped: self._update(len(line)) yield line def read(self, size=-1): data = self._wrapped.read(size) self._update(len(data)) return data @contextlib.contextmanager def file_progress(file, silent=False, **kwargs): if silent: yield file return # file.fileno() throws an exception in our test suite try: fileno = file.fileno() except io.UnsupportedOperation: yield file return if fileno == 0: # 0 means stdin yield file else: file_length = os.path.getsize(file.name) with click.progressbar(length=file_length, **kwargs) as bar: yield UpdateWrapper(file, bar.update) class Format(enum.Enum): CSV = 1 TSV = 2 JSON = 3 NL = 4 class RowsFromFileError(Exception): pass class RowsFromFileBadJSON(RowsFromFileError): pass class RowError(Exception): pass def _extra_key_strategy( reader: Iterable[dict], ignore_extras: Optional[bool] = False, extras_key: Optional[str] = None, ) -> Iterable[dict]: # Logic for handling CSV rows with more values than there are headings for row in reader: # DictReader adds a 'None' key with extra row values if None not in row: yield row elif ignore_extras: # ignoring row.pop(none) because of this issue: # https://github.com/simonw/sqlite-utils/issues/440#issuecomment-1155358637 row.pop(None) # type: ignore yield row elif not extras_key: extras = row.pop(None) # type: ignore raise RowError( "Row {} contained these extra values: {}".format(row, extras) ) else: row[extras_key] = row.pop(None) # type: ignore yield row def rows_from_file( fp: BinaryIO, format: Optional[Format] = None, dialect: Optional[Type[csv.Dialect]] = None, encoding: Optional[str] = None, ignore_extras: Optional[bool] = False, extras_key: Optional[str] = None, ) -> Tuple[Iterable[dict], Format]: """ Load a sequence of dictionaries from a file-like object containing one of four different formats. .. code-block:: python from sqlite_utils.utils import rows_from_file import io rows, format = rows_from_file(io.StringIO("id,name\\n1,Cleo"))) print(list(rows), format) # Outputs [{'id': '1', 'name': 'Cleo'}] Format.CSV This defaults to attempting to automatically detect the format of the data, or you can pass in an explicit format using the format= option. Returns a tuple of ``(rows_generator, format_used)`` where ``rows_generator`` can be iterated over to return dictionaries, while ``format_used`` is a value from the ``sqlite_utils.utils.Format`` enum: .. code-block:: python class Format(enum.Enum): CSV = 1 TSV = 2 JSON = 3 NL = 4 If a CSV or TSV file includes rows with more fields than are declared in the header a ``sqlite_utils.utils.RowError`` exception will be raised when you loop over the generator. You can instead ignore the extra data by passing ``ignore_extras=True``. Or pass ``extras_key="rest"`` to put those additional values in a list in a key called ``rest``. :param fp: a file-like object containing binary data :param format: the format to use - omit this to detect the format :param dialect: the CSV dialect to use - omit this to detect the dialect :param encoding: the character encoding to use when reading CSV/TSV data :param ignore_extras: ignore any extra fields on rows :param extras_key: put any extra fields in a list with this key """ if ignore_extras and extras_key: raise ValueError("Cannot use ignore_extras= and extras_key= together") if format == Format.JSON: decoded = json.load(fp) if isinstance(decoded, dict): decoded = [decoded] if not isinstance(decoded, list): raise RowsFromFileBadJSON("JSON must be a list or a dictionary") return decoded, Format.JSON elif format == Format.NL: return (json.loads(line) for line in fp if line.strip()), Format.NL elif format == Format.CSV: use_encoding: str = encoding or "utf-8-sig" decoded_fp = io.TextIOWrapper(fp, encoding=use_encoding) if dialect is not None: reader = csv.DictReader(decoded_fp, dialect=dialect) else: reader = csv.DictReader(decoded_fp) return _extra_key_strategy(reader, ignore_extras, extras_key), Format.CSV elif format == Format.TSV: rows = rows_from_file( fp, format=Format.CSV, dialect=csv.excel_tab, encoding=encoding )[0] return ( _extra_key_strategy(rows, ignore_extras, extras_key), Format.TSV, ) elif format is None: # Detect the format, then call this recursively buffered = io.BufferedReader(cast(io.RawIOBase, fp), buffer_size=4096) try: first_bytes = buffered.peek(2048).strip() except AttributeError: # Likely the user passed a TextIO when this needs a BytesIO raise TypeError( "rows_from_file() requires a file-like object that supports peek(), such as io.BytesIO" ) if first_bytes.startswith(b"[") or first_bytes.startswith(b"{"): # TODO: Detect newline-JSON return rows_from_file(buffered, format=Format.JSON) else: dialect = csv.Sniffer().sniff( first_bytes.decode(encoding or "utf-8-sig", "ignore") ) rows, _ = rows_from_file( buffered, format=Format.CSV, dialect=dialect, encoding=encoding ) # Make sure we return the format we detected format = Format.TSV if dialect.delimiter == "\t" else Format.CSV return _extra_key_strategy(rows, ignore_extras, extras_key), format else: raise RowsFromFileError("Bad format") class TypeTracker: """ Wrap an iterator of dictionaries and keep track of which SQLite column types are the most likely fit for each of their keys. Example usage: .. code-block:: python from sqlite_utils.utils import TypeTracker import sqlite_utils db = sqlite_utils.Database(memory=True) tracker = TypeTracker() rows = [{"id": "1", "name": "Cleo", "id": "2", "name": "Cardi"}] db["creatures"].insert_all(tracker.wrap(rows)) print(tracker.types) # Outputs {'id': 'integer', 'name': 'text'} db["creatures"].transform(types=tracker.types) """ def __init__(self): self.trackers = {} def wrap(self, iterator: Iterable[dict]) -> Iterable[dict]: """ Use this to loop through an existing iterator, tracking the column types as part of the iteration. :param iterator: The iterator to wrap """ for row in iterator: for key, value in row.items(): tracker = self.trackers.setdefault(key, ValueTracker()) tracker.evaluate(value) yield row @property def types(self) -> Dict[str, str]: """ A dictionary mapping column names to their detected types. This can be passed to the ``db[table_name].transform(types=tracker.types)`` method. """ return {key: tracker.guessed_type for key, tracker in self.trackers.items()} class ValueTracker: def __init__(self): self.couldbe = {key: getattr(self, "test_" + key) for key in self.get_tests()} @classmethod def get_tests(cls): return [ key.split("test_")[-1] for key in cls.__dict__.keys() if key.startswith("test_") ] def test_integer(self, value): try: int(value) return True except (ValueError, TypeError): return False def test_float(self, value): try: float(value) return True except (ValueError, TypeError): return False def __repr__(self) -> str: return self.guessed_type + ": possibilities = " + repr(self.couldbe) @property def guessed_type(self): options = set(self.couldbe.keys()) # Return based on precedence for key in self.get_tests(): if key in options: return key return "text" def evaluate(self, value): if not value or not self.couldbe: return not_these = [] for name, test in self.couldbe.items(): if not test(value): not_these.append(name) for key in not_these: del self.couldbe[key] class NullProgressBar: def __init__(self, *args): self.args = args def __iter__(self): yield from self.args[0] def update(self, value): pass @contextlib.contextmanager def progressbar(*args, **kwargs): silent = kwargs.pop("silent") if silent: yield NullProgressBar(*args) else: with click.progressbar(*args, **kwargs) as bar: yield bar def _compile_code(code, imports, variable="value"): globals = {"r": recipes, "recipes": recipes} # If user defined a convert() function, return that try: exec(code, globals) return globals["convert"] except (AttributeError, SyntaxError, NameError, KeyError, TypeError): pass # Try compiling their code as a function instead body_variants = [code] # If single line and no 'return', try adding the return if "\n" not in code and not code.strip().startswith("return "): body_variants.insert(0, "return {}".format(code)) code_o = None for variant in body_variants: new_code = ["def fn({}):".format(variable)] for line in variant.split("\n"): new_code.append(" {}".format(line)) try: code_o = compile("\n".join(new_code), "", "exec") break except SyntaxError: # Try another variant, e.g. for 'return row["column"] = 1' continue if code_o is None: raise SyntaxError("Could not compile code") for import_ in imports: globals[import_.split(".")[0]] = __import__(import_) exec(code_o, globals) return globals["fn"] def chunks(sequence: Iterable, size: int) -> Iterable[Iterable]: """ Iterate over chunks of the sequence of the given size. :param sequence: Any Python iterator :param size: The size of each chunk """ iterator = iter(sequence) for item in iterator: yield itertools.chain([item], itertools.islice(iterator, size - 1)) def hash_record(record: Dict, keys: Optional[Iterable[str]] = None): """ ``record`` should be a Python dictionary. Returns a sha1 hash of the keys and values in that record. If ``keys=`` is provided, uses just those keys to generate the hash. Example usage:: from sqlite_utils.utils import hash_record hashed = hash_record({"name": "Cleo", "twitter": "CleoPaws"}) # Or with the keys= option: hashed = hash_record( {"name": "Cleo", "twitter": "CleoPaws", "age": 7}, keys=("name", "twitter") ) :param record: Record to generate a hash for :param keys: Subset of keys to use for that hash """ to_hash = record if keys is not None: to_hash = {key: record[key] for key in keys} return hashlib.sha1( json.dumps(to_hash, separators=(",", ":"), sort_keys=True, default=repr).encode( "utf8" ) ).hexdigest() def _flatten(d): for key, value in d.items(): if isinstance(value, dict): for key2, value2 in _flatten(value): yield key + "_" + key2, value2 else: yield key, value def flatten(row: dict) -> dict: """ Turn a nested dict e.g. ``{"a": {"b": 1}}`` into a flat dict: ``{"a_b": 1}`` :param row: A Python dictionary, optionally with nested dictionaries """ return dict(_flatten(row)) sqlite-utils-3.35.2/tests/000077500000000000000000000000001452131415600154075ustar00rootroot00000000000000sqlite-utils-3.35.2/tests/__init__.py000066400000000000000000000000001452131415600175060ustar00rootroot00000000000000sqlite-utils-3.35.2/tests/conftest.py000066400000000000000000000015111452131415600176040ustar00rootroot00000000000000from sqlite_utils import Database from sqlite_utils.utils import sqlite3 import pytest CREATE_TABLES = """ create table Gosh (c1 text, c2 text, c3 text); create table Gosh2 (c1 text, c2 text, c3 text); """ def pytest_configure(config): import sys sys._called_from_test = True @pytest.fixture def fresh_db(): return Database(memory=True) @pytest.fixture def existing_db(): database = Database(memory=True) database.executescript( """ CREATE TABLE foo (text TEXT); INSERT INTO foo (text) values ("one"); INSERT INTO foo (text) values ("two"); INSERT INTO foo (text) values ("three"); """ ) return database @pytest.fixture def db_path(tmpdir): path = str(tmpdir / "test.db") db = sqlite3.connect(path) db.executescript(CREATE_TABLES) return path sqlite-utils-3.35.2/tests/ext.c000066400000000000000000000030131452131415600163500ustar00rootroot00000000000000/* ** This file implements a SQLite extension with multiple entrypoints. ** ** The default entrypoint, sqlite3_ext_init, has a single function "a". ** The 1st alternate entrypoint, sqlite3_ext_b_init, has a single function "b". ** The 2nd alternate entrypoint, sqlite3_ext_c_init, has a single function "c". ** ** Compiling instructions: ** https://www.sqlite.org/loadext.html#compiling_a_loadable_extension ** */ #include "sqlite3ext.h" SQLITE_EXTENSION_INIT1 // SQL function that returns back the value supplied during sqlite3_create_function() static void func(sqlite3_context *context, int argc, sqlite3_value **argv) { sqlite3_result_text(context, (char *) sqlite3_user_data(context), -1, SQLITE_STATIC); } // The default entrypoint, since it matches the "ext.dylib"/"ext.so" name #ifdef _WIN32 __declspec(dllexport) #endif int sqlite3_ext_init(sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi) { SQLITE_EXTENSION_INIT2(pApi); return sqlite3_create_function(db, "a", 0, 0, "a", func, 0, 0); } // Alternate entrypoint #1 #ifdef _WIN32 __declspec(dllexport) #endif int sqlite3_ext_b_init(sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi) { SQLITE_EXTENSION_INIT2(pApi); return sqlite3_create_function(db, "b", 0, 0, "b", func, 0, 0); } // Alternate entrypoint #2 #ifdef _WIN32 __declspec(dllexport) #endif int sqlite3_ext_c_init(sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi) { SQLITE_EXTENSION_INIT2(pApi); return sqlite3_create_function(db, "c", 0, 0, "c", func, 0, 0); } sqlite-utils-3.35.2/tests/sniff/000077500000000000000000000000001452131415600165145ustar00rootroot00000000000000sqlite-utils-3.35.2/tests/sniff/example1.csv000066400000000000000000000001421452131415600207420ustar00rootroot00000000000000id,species,name,age 1,dog,Cleo,5 2,dog,Pancakes,4 3,cat,Mozie,8 4,spider,"Daisy, the tarantula",6 sqlite-utils-3.35.2/tests/sniff/example2.csv000066400000000000000000000001421452131415600207430ustar00rootroot00000000000000id;species;name;age 1;dog;Cleo;5 2;dog;Pancakes;4 3;cat;Mozie;8 4;spider;"Daisy, the tarantula";6 sqlite-utils-3.35.2/tests/sniff/example3.csv000066400000000000000000000001421452131415600207440ustar00rootroot00000000000000id,species,name,age 1,dog,Cleo,5 2,dog,Pancakes,4 3,cat,Mozie,8 4,spider,'Daisy, the tarantula',6 sqlite-utils-3.35.2/tests/sniff/example4.csv000066400000000000000000000001421452131415600207450ustar00rootroot00000000000000id species name age 1 dog Cleo 5 2 dog Pancakes 4 3 cat Mozie 8 4 spider 'Daisy, the tarantula' 6 sqlite-utils-3.35.2/tests/test_analyze.py000066400000000000000000000034551452131415600204720ustar00rootroot00000000000000import pytest @pytest.fixture def db(fresh_db): fresh_db["one_index"].insert({"id": 1, "name": "Cleo"}, pk="id") fresh_db["one_index"].create_index(["name"]) fresh_db["two_indexes"].insert({"id": 1, "name": "Cleo", "species": "dog"}, pk="id") fresh_db["two_indexes"].create_index(["name"]) fresh_db["two_indexes"].create_index(["species"]) return fresh_db def test_analyze_whole_database(db): assert set(db.table_names()) == {"one_index", "two_indexes"} db.analyze() assert set(db.table_names()).issuperset( {"one_index", "two_indexes", "sqlite_stat1"} ) assert list(db["sqlite_stat1"].rows) == [ {"tbl": "two_indexes", "idx": "idx_two_indexes_species", "stat": "1 1"}, {"tbl": "two_indexes", "idx": "idx_two_indexes_name", "stat": "1 1"}, {"tbl": "one_index", "idx": "idx_one_index_name", "stat": "1 1"}, ] @pytest.mark.parametrize("method", ("db_method_with_name", "table_method")) def test_analyze_one_table(db, method): assert set(db.table_names()).issuperset({"one_index", "two_indexes"}) if method == "db_method_with_name": db.analyze("one_index") elif method == "table_method": db["one_index"].analyze() assert set(db.table_names()).issuperset( {"one_index", "two_indexes", "sqlite_stat1"} ) assert list(db["sqlite_stat1"].rows) == [ {"tbl": "one_index", "idx": "idx_one_index_name", "stat": "1 1"} ] def test_analyze_index_by_name(db): assert set(db.table_names()) == {"one_index", "two_indexes"} db.analyze("idx_two_indexes_species") assert set(db.table_names()).issuperset( {"one_index", "two_indexes", "sqlite_stat1"} ) assert list(db["sqlite_stat1"].rows) == [ {"tbl": "two_indexes", "idx": "idx_two_indexes_species", "stat": "1 1"}, ] sqlite-utils-3.35.2/tests/test_analyze_tables.py000066400000000000000000000174551452131415600220310ustar00rootroot00000000000000from sqlite_utils.db import Database, ColumnDetails from sqlite_utils import cli from click.testing import CliRunner import pytest import sqlite3 @pytest.fixture def db_to_analyze(fresh_db): stuff = fresh_db["stuff"] stuff.insert_all( [ {"id": 1, "owner": "Terryterryterry", "size": 5}, {"id": 2, "owner": "Joan", "size": 4}, {"id": 3, "owner": "Kumar", "size": 5}, {"id": 4, "owner": "Anne", "size": 5}, {"id": 5, "owner": "Terryterryterry", "size": 5}, {"id": 6, "owner": "Joan", "size": 4}, {"id": 7, "owner": "Kumar", "size": 5}, {"id": 8, "owner": "Joan", "size": 4}, ], pk="id", ) return fresh_db @pytest.fixture def big_db_to_analyze_path(tmpdir): path = str(tmpdir / "test.db") db = Database(path) categories = { "A": 40, "B": 30, "C": 20, "D": 10, } to_insert = [] for category, count in categories.items(): for _ in range(count): to_insert.append( { "category": category, "all_null": None, } ) db["stuff"].insert_all(to_insert) return path @pytest.mark.parametrize( "column,extra_kwargs,expected", [ ( "id", {}, ColumnDetails( table="stuff", column="id", total_rows=8, num_null=0, num_blank=0, num_distinct=8, most_common=None, least_common=None, ), ), ( "owner", {}, ColumnDetails( table="stuff", column="owner", total_rows=8, num_null=0, num_blank=0, num_distinct=4, most_common=[("Joan", 3), ("Kumar", 2)], least_common=[("Anne", 1), ("Terry...", 2)], ), ), ( "size", {}, ColumnDetails( table="stuff", column="size", total_rows=8, num_null=0, num_blank=0, num_distinct=2, most_common=[(5, 5), (4, 3)], least_common=None, ), ), ( "owner", {"most_common": False}, ColumnDetails( table="stuff", column="owner", total_rows=8, num_null=0, num_blank=0, num_distinct=4, most_common=None, least_common=[("Anne", 1), ("Terry...", 2)], ), ), ( "owner", {"least_common": False}, ColumnDetails( table="stuff", column="owner", total_rows=8, num_null=0, num_blank=0, num_distinct=4, most_common=[("Joan", 3), ("Kumar", 2)], least_common=None, ), ), ], ) def test_analyze_column(db_to_analyze, column, extra_kwargs, expected): assert ( db_to_analyze["stuff"].analyze_column( column, common_limit=2, value_truncate=5, **extra_kwargs ) == expected ) @pytest.fixture def db_to_analyze_path(db_to_analyze, tmpdir): path = str(tmpdir / "test.db") db = sqlite3.connect(path) sql = "\n".join(db_to_analyze.iterdump()) db.executescript(sql) return path def test_analyze_table(db_to_analyze_path): result = CliRunner().invoke(cli.cli, ["analyze-tables", db_to_analyze_path]) assert ( result.output.strip() == ( """ stuff.id: (1/3) Total rows: 8 Null rows: 0 Blank rows: 0 Distinct values: 8 stuff.owner: (2/3) Total rows: 8 Null rows: 0 Blank rows: 0 Distinct values: 4 Most common: 3: Joan 2: Terryterryterry 2: Kumar 1: Anne stuff.size: (3/3) Total rows: 8 Null rows: 0 Blank rows: 0 Distinct values: 2 Most common: 5: 5 3: 4""" ).strip() ) def test_analyze_table_save(db_to_analyze_path): result = CliRunner().invoke( cli.cli, ["analyze-tables", db_to_analyze_path, "--save"] ) assert result.exit_code == 0 rows = list(Database(db_to_analyze_path)["_analyze_tables_"].rows) assert rows == [ { "table": "stuff", "column": "id", "total_rows": 8, "num_null": 0, "num_blank": 0, "num_distinct": 8, "most_common": None, "least_common": None, }, { "table": "stuff", "column": "owner", "total_rows": 8, "num_null": 0, "num_blank": 0, "num_distinct": 4, "most_common": '[["Joan", 3], ["Terryterryterry", 2], ["Kumar", 2], ["Anne", 1]]', "least_common": None, }, { "table": "stuff", "column": "size", "total_rows": 8, "num_null": 0, "num_blank": 0, "num_distinct": 2, "most_common": "[[5, 5], [4, 3]]", "least_common": None, }, ] @pytest.mark.parametrize( "no_most,no_least", ( (False, False), (True, False), (False, True), (True, True), ), ) def test_analyze_table_save_no_most_no_least_options( no_most, no_least, big_db_to_analyze_path ): args = [ "analyze-tables", big_db_to_analyze_path, "--save", "--common-limit", "2", "--column", "category", ] if no_most: args.append("--no-most") if no_least: args.append("--no-least") result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0 rows = list(Database(big_db_to_analyze_path)["_analyze_tables_"].rows) expected = { "table": "stuff", "column": "category", "total_rows": 100, "num_null": 0, "num_blank": 0, "num_distinct": 4, "most_common": None, "least_common": None, } if not no_most: expected["most_common"] = '[["A", 40], ["B", 30]]' if not no_least: expected["least_common"] = '[["D", 10], ["C", 20]]' assert rows == [expected] def test_analyze_table_column_all_nulls(big_db_to_analyze_path): result = CliRunner().invoke( cli.cli, ["analyze-tables", big_db_to_analyze_path, "stuff", "--column", "all_null"], ) assert result.exit_code == 0 assert result.output == ( "stuff.all_null: (1/1)\n\n Total rows: 100\n" " Null rows: 100\n" " Blank rows: 0\n" "\n" " Distinct values: 0\n\n" ) @pytest.mark.parametrize( "args,expected_error", ( (["-c", "bad_column"], "These columns were not found: bad_column\n"), (["one", "-c", "age"], "These columns were not found: age\n"), (["two", "-c", "age"], None), ( ["one", "-c", "age", "--column", "bad"], "These columns were not found: age, bad\n", ), ), ) def test_analyze_table_validate_columns(tmpdir, args, expected_error): path = str(tmpdir / "test_validate_columns.db") db = Database(path) db["one"].insert( { "id": 1, "name": "one", } ) db["two"].insert( { "id": 1, "age": 5, } ) result = CliRunner().invoke( cli.cli, ["analyze-tables", path] + args, catch_exceptions=False, ) assert result.exit_code == (1 if expected_error else 0) if expected_error: assert expected_error in result.output sqlite-utils-3.35.2/tests/test_attach.py000066400000000000000000000007551452131415600202730ustar00rootroot00000000000000from sqlite_utils import Database def test_attach(tmpdir): foo_path = str(tmpdir / "foo.db") bar_path = str(tmpdir / "bar.db") db = Database(foo_path) with db.conn: db["foo"].insert({"id": 1, "text": "foo"}) db2 = Database(bar_path) with db2.conn: db2["bar"].insert({"id": 1, "text": "bar"}) db.attach("bar", bar_path) assert db.execute( "select * from foo union all select * from bar.bar" ).fetchall() == [(1, "foo"), (1, "bar")] sqlite-utils-3.35.2/tests/test_cli.py000066400000000000000000002221731452131415600175760ustar00rootroot00000000000000from sqlite_utils import cli, Database from sqlite_utils.db import Index, ForeignKey from click.testing import CliRunner from pathlib import Path import subprocess import sys from unittest import mock import json import os import pytest import textwrap def write_json(file_path, data): with open(file_path, "w") as fp: json.dump(data, fp) def _supports_pragma_function_list(): db = Database(memory=True) try: db.execute("select * from pragma_function_list()") except Exception: return False return True def _has_compiled_ext(): for ext in ["dylib", "so", "dll"]: path = Path(__file__).parent / f"ext.{ext}" if path.is_file(): return True return False COMPILED_EXTENSION_PATH = str(Path(__file__).parent / "ext") @pytest.mark.parametrize( "options", ( ["-h"], ["--help"], ["insert", "-h"], ["insert", "--help"], ), ) def test_help(options): result = CliRunner().invoke(cli.cli, options) assert result.exit_code == 0 assert result.output.startswith("Usage: ") assert "-h, --help" in result.output def test_tables(db_path): result = CliRunner().invoke(cli.cli, ["tables", db_path], catch_exceptions=False) assert '[{"table": "Gosh"},\n {"table": "Gosh2"}]' == result.output.strip() def test_views(db_path): Database(db_path).create_view("hello", "select sqlite_version()") result = CliRunner().invoke(cli.cli, ["views", db_path, "--table", "--schema"]) assert ( "view schema\n" "------ --------------------------------------------\n" "hello CREATE VIEW hello AS select sqlite_version()" ) == result.output.strip() def test_tables_fts4(db_path): Database(db_path)["Gosh"].enable_fts(["c2"], fts_version="FTS4") result = CliRunner().invoke(cli.cli, ["tables", "--fts4", db_path]) assert '[{"table": "Gosh_fts"}]' == result.output.strip() def test_tables_fts5(db_path): Database(db_path)["Gosh"].enable_fts(["c2"], fts_version="FTS5") result = CliRunner().invoke(cli.cli, ["tables", "--fts5", db_path]) assert '[{"table": "Gosh_fts"}]' == result.output.strip() def test_tables_counts_and_columns(db_path): db = Database(db_path) with db.conn: db["lots"].insert_all([{"id": i, "age": i + 1} for i in range(30)]) result = CliRunner().invoke(cli.cli, ["tables", "--counts", "--columns", db_path]) assert ( '[{"table": "Gosh", "count": 0, "columns": ["c1", "c2", "c3"]},\n' ' {"table": "Gosh2", "count": 0, "columns": ["c1", "c2", "c3"]},\n' ' {"table": "lots", "count": 30, "columns": ["id", "age"]}]' ) == result.output.strip() @pytest.mark.parametrize( "format,expected", [ ( "--csv", ( "table,count,columns\n" 'Gosh,0,"c1\n' "c2\n" 'c3"\n' 'Gosh2,0,"c1\n' "c2\n" 'c3"\n' 'lots,30,"id\n' 'age"' ), ), ( "--tsv", "table\tcount\tcolumns\nGosh\t0\t['c1', 'c2', 'c3']\nGosh2\t0\t['c1', 'c2', 'c3']\nlots\t30\t['id', 'age']", ), ], ) def test_tables_counts_and_columns_csv(db_path, format, expected): db = Database(db_path) with db.conn: db["lots"].insert_all([{"id": i, "age": i + 1} for i in range(30)]) result = CliRunner().invoke( cli.cli, ["tables", "--counts", "--columns", format, db_path] ) assert result.output.strip().replace("\r", "") == expected def test_tables_schema(db_path): db = Database(db_path) with db.conn: db["lots"].insert_all([{"id": i, "age": i + 1} for i in range(30)]) result = CliRunner().invoke(cli.cli, ["tables", "--schema", db_path]) assert ( '[{"table": "Gosh", "schema": "CREATE TABLE Gosh (c1 text, c2 text, c3 text)"},\n' ' {"table": "Gosh2", "schema": "CREATE TABLE Gosh2 (c1 text, c2 text, c3 text)"},\n' ' {"table": "lots", "schema": "CREATE TABLE [lots] (\\n [id] INTEGER,\\n [age] INTEGER\\n)"}]' ) == result.output.strip() @pytest.mark.parametrize( "options,expected", [ ( ["--fmt", "simple"], ( "c1 c2 c3\n" "----- ----- ----------\n" "verb0 noun0 adjective0\n" "verb1 noun1 adjective1\n" "verb2 noun2 adjective2\n" "verb3 noun3 adjective3" ), ), ( ["-t"], ( "c1 c2 c3\n" "----- ----- ----------\n" "verb0 noun0 adjective0\n" "verb1 noun1 adjective1\n" "verb2 noun2 adjective2\n" "verb3 noun3 adjective3" ), ), ( ["--fmt", "rst"], ( "===== ===== ==========\n" "c1 c2 c3\n" "===== ===== ==========\n" "verb0 noun0 adjective0\n" "verb1 noun1 adjective1\n" "verb2 noun2 adjective2\n" "verb3 noun3 adjective3\n" "===== ===== ==========" ), ), ], ) def test_output_table(db_path, options, expected): db = Database(db_path) with db.conn: db["rows"].insert_all( [ { "c1": "verb{}".format(i), "c2": "noun{}".format(i), "c3": "adjective{}".format(i), } for i in range(4) ] ) result = CliRunner().invoke(cli.cli, ["rows", db_path, "rows"] + options) assert result.exit_code == 0 assert expected == result.output.strip() def test_create_index(db_path): db = Database(db_path) assert [] == db["Gosh"].indexes result = CliRunner().invoke(cli.cli, ["create-index", db_path, "Gosh", "c1"]) assert result.exit_code == 0 assert [ Index( seq=0, name="idx_Gosh_c1", unique=0, origin="c", partial=0, columns=["c1"] ) ] == db["Gosh"].indexes # Try with a custom name result = CliRunner().invoke( cli.cli, ["create-index", db_path, "Gosh", "c2", "--name", "blah"] ) assert result.exit_code == 0 assert [ Index(seq=0, name="blah", unique=0, origin="c", partial=0, columns=["c2"]), Index( seq=1, name="idx_Gosh_c1", unique=0, origin="c", partial=0, columns=["c1"] ), ] == db["Gosh"].indexes # Try a two-column unique index create_index_unique_args = [ "create-index", db_path, "Gosh2", "c1", "c2", "--unique", ] result = CliRunner().invoke(cli.cli, create_index_unique_args) assert result.exit_code == 0 assert [ Index( seq=0, name="idx_Gosh2_c1_c2", unique=1, origin="c", partial=0, columns=["c1", "c2"], ) ] == db["Gosh2"].indexes # Trying to create the same index should fail assert CliRunner().invoke(cli.cli, create_index_unique_args).exit_code != 0 # ... unless we use --if-not-exists or --ignore for option in ("--if-not-exists", "--ignore"): assert ( CliRunner().invoke(cli.cli, create_index_unique_args + [option]).exit_code == 0 ) def test_create_index_analyze(db_path): db = Database(db_path) assert "sqlite_stat1" not in db.table_names() assert [] == db["Gosh"].indexes result = CliRunner().invoke( cli.cli, ["create-index", db_path, "Gosh", "c1", "--analyze"] ) assert result.exit_code == 0 assert "sqlite_stat1" in db.table_names() def test_create_index_desc(db_path): db = Database(db_path) assert [] == db["Gosh"].indexes result = CliRunner().invoke(cli.cli, ["create-index", db_path, "Gosh", "--", "-c1"]) assert result.exit_code == 0 assert ( db.execute("select sql from sqlite_master where type='index'").fetchone()[0] == "CREATE INDEX [idx_Gosh_c1]\n ON [Gosh] ([c1] desc)" ) @pytest.mark.parametrize( "col_name,col_type,expected_schema", ( ("text", "TEXT", "CREATE TABLE [dogs] (\n [name] TEXT\n, [text] TEXT)"), ( "integer", "INTEGER", "CREATE TABLE [dogs] (\n [name] TEXT\n, [integer] INTEGER)", ), ("float", "FLOAT", "CREATE TABLE [dogs] (\n [name] TEXT\n, [float] FLOAT)"), ("blob", "blob", "CREATE TABLE [dogs] (\n [name] TEXT\n, [blob] BLOB)"), ("default", None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [default] TEXT)"), ), ) def test_add_column(db_path, col_name, col_type, expected_schema): db = Database(db_path) db.create_table("dogs", {"name": str}) assert db["dogs"].schema == "CREATE TABLE [dogs] (\n [name] TEXT\n)" args = ["add-column", db_path, "dogs", col_name] if col_type is not None: args.append(col_type) assert CliRunner().invoke(cli.cli, args).exit_code == 0 assert db["dogs"].schema == expected_schema @pytest.mark.parametrize("ignore", (True, False)) def test_add_column_ignore(db_path, ignore): db = Database(db_path) db.create_table("dogs", {"name": str}) args = ["add-column", db_path, "dogs", "name"] + (["--ignore"] if ignore else []) result = CliRunner().invoke(cli.cli, args) if ignore: assert result.exit_code == 0 else: assert result.exit_code == 1 assert result.output == "Error: duplicate column name: name\n" def test_add_column_not_null_default(db_path): db = Database(db_path) db.create_table("dogs", {"name": str}) assert db["dogs"].schema == "CREATE TABLE [dogs] (\n [name] TEXT\n)" args = [ "add-column", db_path, "dogs", "nickname", "--not-null-default", "dogs'dawg", ] assert CliRunner().invoke(cli.cli, args).exit_code == 0 assert db["dogs"].schema == ( "CREATE TABLE [dogs] (\n" " [name] TEXT\n" ", [nickname] TEXT NOT NULL DEFAULT 'dogs''dawg')" ) @pytest.mark.parametrize( "args,assert_message", ( ( ["books", "author_id", "authors", "id"], "Explicit other_table and other_column", ), (["books", "author_id", "authors"], "Explicit other_table, guess other_column"), (["books", "author_id"], "Automatically guess other_table and other_column"), ), ) def test_add_foreign_key(db_path, args, assert_message): db = Database(db_path) db["authors"].insert_all( [{"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"}], pk="id" ) db["books"].insert_all( [ {"title": "Hedgehogs of the world", "author_id": 1}, {"title": "How to train your wolf", "author_id": 2}, ] ) assert ( CliRunner().invoke(cli.cli, ["add-foreign-key", db_path] + args).exit_code == 0 ), assert_message assert [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ) ] == db["books"].foreign_keys # Error if we try to add it twice: result = CliRunner().invoke( cli.cli, ["add-foreign-key", db_path, "books", "author_id", "authors", "id"] ) assert result.exit_code != 0 assert ( "Error: Foreign key already exists for author_id => authors.id" == result.output.strip() ) # No error if we add it twice with --ignore result = CliRunner().invoke( cli.cli, ["add-foreign-key", db_path, "books", "author_id", "authors", "id", "--ignore"], ) assert result.exit_code == 0 # Error if we try against an invalid column result = CliRunner().invoke( cli.cli, ["add-foreign-key", db_path, "books", "author_id", "authors", "bad"] ) assert result.exit_code != 0 assert "Error: No such column: authors.bad" == result.output.strip() def test_add_column_foreign_key(db_path): db = Database(db_path) db["authors"].insert({"id": 1, "name": "Sally"}, pk="id") db["books"].insert({"title": "Hedgehogs of the world"}) # Add an author_id foreign key column to the books table result = CliRunner().invoke( cli.cli, ["add-column", db_path, "books", "author_id", "--fk", "authors"] ) assert result.exit_code == 0, result.output assert db["books"].schema == ( 'CREATE TABLE "books" (\n' " [title] TEXT,\n" " [author_id] INTEGER REFERENCES [authors]([id])\n" ")" ) # Try it again with a custom --fk-col result = CliRunner().invoke( cli.cli, [ "add-column", db_path, "books", "author_name_ref", "--fk", "authors", "--fk-col", "name", ], ) assert result.exit_code == 0, result.output assert db["books"].schema == ( 'CREATE TABLE "books" (\n' " [title] TEXT,\n" " [author_id] INTEGER REFERENCES [authors]([id]),\n" " [author_name_ref] TEXT REFERENCES [authors]([name])\n" ")" ) # Throw an error if the --fk table does not exist result = CliRunner().invoke( cli.cli, ["add-column", db_path, "books", "author_id", "--fk", "bobcats"] ) assert result.exit_code != 0 assert "table 'bobcats' does not exist" in str(result.exception) def test_suggest_alter_if_column_missing(db_path): db = Database(db_path) db["authors"].insert({"id": 1, "name": "Sally"}, pk="id") result = CliRunner().invoke( cli.cli, ["insert", db_path, "authors", "-"], input='{"id": 2, "name": "Barry", "age": 43}', ) assert result.exit_code != 0 assert result.output.strip() == ( "Error: table authors has no column named age\n\n" "Try using --alter to add additional columns" ) def test_index_foreign_keys(db_path): test_add_column_foreign_key(db_path) db = Database(db_path) assert [] == db["books"].indexes result = CliRunner().invoke(cli.cli, ["index-foreign-keys", db_path]) assert result.exit_code == 0 assert [["author_id"], ["author_name_ref"]] == [ i.columns for i in db["books"].indexes ] def test_enable_fts(db_path): db = Database(db_path) assert db["Gosh"].detect_fts() is None result = CliRunner().invoke( cli.cli, ["enable-fts", db_path, "Gosh", "c1", "--fts4"] ) assert result.exit_code == 0 assert "Gosh_fts" == db["Gosh"].detect_fts() # Table names with restricted chars are handled correctly. # colons and dots are restricted characters for table names. db["http://example.com"].create({"c1": str, "c2": str, "c3": str}) assert db["http://example.com"].detect_fts() is None result = CliRunner().invoke( cli.cli, [ "enable-fts", db_path, "http://example.com", "c1", "--fts4", "--tokenize", "porter", ], ) assert result.exit_code == 0 assert "http://example.com_fts" == db["http://example.com"].detect_fts() # Check tokenize was set to porter assert ( "CREATE VIRTUAL TABLE [http://example.com_fts] USING FTS4 (\n" " [c1],\n" " tokenize='porter',\n" " content=[http://example.com]" "\n)" ) == db["http://example.com_fts"].schema db["http://example.com"].drop() def test_enable_fts_replace(db_path): db = Database(db_path) assert db["Gosh"].detect_fts() is None result = CliRunner().invoke( cli.cli, ["enable-fts", db_path, "Gosh", "c1", "--fts4"] ) assert result.exit_code == 0 assert "Gosh_fts" == db["Gosh"].detect_fts() assert db["Gosh_fts"].columns_dict == {"c1": str} # This should throw an error result2 = CliRunner().invoke( cli.cli, ["enable-fts", db_path, "Gosh", "c1", "--fts4"] ) assert result2.exit_code == 1 assert result2.output == "Error: table [Gosh_fts] already exists\n" # This should work result3 = CliRunner().invoke( cli.cli, ["enable-fts", db_path, "Gosh", "c2", "--fts4", "--replace"] ) assert result3.exit_code == 0 assert db["Gosh_fts"].columns_dict == {"c2": str} def test_enable_fts_with_triggers(db_path): Database(db_path)["Gosh"].insert_all([{"c1": "baz"}]) exit_code = ( CliRunner() .invoke( cli.cli, ["enable-fts", db_path, "Gosh", "c1", "--fts4", "--create-triggers"], ) .exit_code ) assert exit_code == 0 def search(q): return ( Database(db_path) .execute("select c1 from Gosh_fts where c1 match ?", [q]) .fetchall() ) assert [("baz",)] == search("baz") Database(db_path)["Gosh"].insert_all([{"c1": "martha"}]) assert [("martha",)] == search("martha") def test_populate_fts(db_path): Database(db_path)["Gosh"].insert_all([{"c1": "baz"}]) exit_code = ( CliRunner() .invoke(cli.cli, ["enable-fts", db_path, "Gosh", "c1", "--fts4"]) .exit_code ) assert exit_code == 0 def search(q): return ( Database(db_path) .execute("select c1 from Gosh_fts where c1 match ?", [q]) .fetchall() ) assert [("baz",)] == search("baz") Database(db_path)["Gosh"].insert_all([{"c1": "martha"}]) assert [] == search("martha") exit_code = ( CliRunner().invoke(cli.cli, ["populate-fts", db_path, "Gosh", "c1"]).exit_code ) assert exit_code == 0 assert [("martha",)] == search("martha") def test_disable_fts(db_path): db = Database(db_path) assert {"Gosh", "Gosh2"} == set(db.table_names()) db["Gosh"].enable_fts(["c1"], create_triggers=True) assert { "Gosh_fts", "Gosh_fts_idx", "Gosh_fts_data", "Gosh2", "Gosh_fts_config", "Gosh", "Gosh_fts_docsize", } == set(db.table_names()) exit_code = CliRunner().invoke(cli.cli, ["disable-fts", db_path, "Gosh"]).exit_code assert exit_code == 0 assert {"Gosh", "Gosh2"} == set(db.table_names()) def test_vacuum(db_path): result = CliRunner().invoke(cli.cli, ["vacuum", db_path]) assert result.exit_code == 0 def test_dump(db_path): result = CliRunner().invoke(cli.cli, ["dump", db_path]) assert result.exit_code == 0 assert result.output.startswith("BEGIN TRANSACTION;") assert result.output.strip().endswith("COMMIT;") @pytest.mark.parametrize("tables", ([], ["Gosh"], ["Gosh2"])) def test_optimize(db_path, tables): db = Database(db_path) with db.conn: for table in ("Gosh", "Gosh2"): db[table].insert_all( [ { "c1": "verb{}".format(i), "c2": "noun{}".format(i), "c3": "adjective{}".format(i), } for i in range(10000) ] ) db["Gosh"].enable_fts(["c1", "c2", "c3"], fts_version="FTS4") db["Gosh2"].enable_fts(["c1", "c2", "c3"], fts_version="FTS5") size_before_optimize = os.stat(db_path).st_size result = CliRunner().invoke(cli.cli, ["optimize", db_path] + tables) assert result.exit_code == 0 size_after_optimize = os.stat(db_path).st_size # Weirdest thing: tests started failing because size after # ended up larger than size before in some cases. I think # it's OK to tolerate that happening, though it's very strange. assert size_after_optimize <= (size_before_optimize + 10000) # Soundness check that --no-vacuum doesn't throw errors: result = CliRunner().invoke(cli.cli, ["optimize", "--no-vacuum", db_path]) assert result.exit_code == 0 def test_rebuild_fts_fixes_docsize_error(db_path): db = Database(db_path, recursive_triggers=False) records = [ { "c1": "verb{}".format(i), "c2": "noun{}".format(i), "c3": "adjective{}".format(i), } for i in range(10000) ] with db.conn: db["fts5_table"].insert_all(records, pk="c1") db["fts5_table"].enable_fts( ["c1", "c2", "c3"], fts_version="FTS5", create_triggers=True ) # Search should work assert list(db["fts5_table"].search("verb1")) # Replicate docsize error from this issue for FTS5 # https://github.com/simonw/sqlite-utils/issues/149 assert db["fts5_table_fts_docsize"].count == 10000 db["fts5_table"].insert_all(records, replace=True) assert db["fts5_table"].count == 10000 assert db["fts5_table_fts_docsize"].count == 20000 # Running rebuild-fts should fix this result = CliRunner().invoke(cli.cli, ["rebuild-fts", db_path, "fts5_table"]) assert result.exit_code == 0 assert db["fts5_table_fts_docsize"].count == 10000 @pytest.mark.parametrize( "format,expected", [ ("--csv", "id,name,age\n1,Cleo,4\n2,Pancakes,2\n"), ("--tsv", "id\tname\tage\n1\tCleo\t4\n2\tPancakes\t2\n"), ], ) def test_query_csv(db_path, format, expected): db = Database(db_path) with db.conn: db["dogs"].insert_all( [ {"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}, ] ) result = CliRunner().invoke( cli.cli, [db_path, "select id, name, age from dogs", format] ) assert result.exit_code == 0 assert result.output.replace("\r", "") == expected # Test the no-headers option: result = CliRunner().invoke( cli.cli, [db_path, "select id, name, age from dogs", "--no-headers", format] ) expected_rest = "\n".join(expected.split("\n")[1:]).strip() assert result.output.strip().replace("\r", "") == expected_rest _all_query = "select id, name, age from dogs" _one_query = "select id, name, age from dogs where id = 1" @pytest.mark.parametrize( "sql,args,expected", [ ( _all_query, [], '[{"id": 1, "name": "Cleo", "age": 4},\n {"id": 2, "name": "Pancakes", "age": 2}]', ), ( _all_query, ["--nl"], '{"id": 1, "name": "Cleo", "age": 4}\n{"id": 2, "name": "Pancakes", "age": 2}', ), (_all_query, ["--arrays"], '[[1, "Cleo", 4],\n [2, "Pancakes", 2]]'), (_all_query, ["--arrays", "--nl"], '[1, "Cleo", 4]\n[2, "Pancakes", 2]'), (_one_query, [], '[{"id": 1, "name": "Cleo", "age": 4}]'), (_one_query, ["--nl"], '{"id": 1, "name": "Cleo", "age": 4}'), (_one_query, ["--arrays"], '[[1, "Cleo", 4]]'), (_one_query, ["--arrays", "--nl"], '[1, "Cleo", 4]'), ( "select id, dog(age) from dogs", ["--functions", "def dog(i):\n return i * 7"], '[{"id": 1, "dog(age)": 28},\n {"id": 2, "dog(age)": 14}]', ), ], ) def test_query_json(db_path, sql, args, expected): db = Database(db_path) with db.conn: db["dogs"].insert_all( [ {"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}, ] ) result = CliRunner().invoke(cli.cli, [db_path, sql] + args) assert expected == result.output.strip() def test_query_json_empty(db_path): result = CliRunner().invoke( cli.cli, [db_path, "select * from sqlite_master where 0"], ) assert result.output.strip() == "[]" def test_query_invalid_function(db_path): result = CliRunner().invoke( cli.cli, [db_path, "select bad()", "--functions", "def invalid_python"] ) assert result.exit_code == 1 assert result.output.startswith("Error: Error in functions definition:") TEST_FUNCTIONS = """ def zero(): return 0 def one(a): return a def _two(a, b): return a + b def two(a, b): return _two(a, b) """ def test_query_complex_function(db_path): result = CliRunner().invoke( cli.cli, [ db_path, "select zero(), one(1), two(1, 2)", "--functions", TEST_FUNCTIONS, ], ) assert result.exit_code == 0 assert json.loads(result.output.strip()) == [ {"zero()": 0, "one(1)": 1, "two(1, 2)": 3} ] @pytest.mark.skipif( not _supports_pragma_function_list(), reason="Needs SQLite version that supports pragma_function_list()", ) def test_hidden_functions_are_hidden(db_path): result = CliRunner().invoke( cli.cli, [ db_path, "select name from pragma_function_list()", "--functions", TEST_FUNCTIONS, ], ) assert result.exit_code == 0 functions = {r["name"] for r in json.loads(result.output.strip())} assert "zero" in functions assert "one" in functions assert "two" in functions assert "_two" not in functions LOREM_IPSUM_COMPRESSED = ( b"x\x9c\xed\xd1\xcdq\x03!\x0c\x05\xe0\xbb\xabP\x01\x1eW\x91\xdc|M\x01\n\xc8\x8e" b"f\xf83H\x1e\x97\x1f\x91M\x8e\xe9\xe0\xdd\x96\x05\x84\xf4\xbek\x9fRI\xc7\xf2J" b"\xb9\x97>i\xa9\x11W\xb13\xa5\xde\x96$\x13\xf3I\x9cu\xe8J\xda\xee$EcsI\x8e\x0b" b"$\xea\xab\xf6L&u\xc4emI\xb3foFnT\xf83\xca\x93\xd8QZ\xa8\xf2\xbd1q\xd1\x87\xf3" b"\x85>\x8c\xa4i\x8d\xdaTu\x7f\xf0\x81\x0f|\xe0\x03" b"\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03" b"\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03" b"\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\x03" b"\x1f\xf8\xc0\x07>\xf0\x81\x0f|\xe0\xfb\x8f\xef\x1b\x9b\x06\x83}" ) def test_query_json_binary(db_path): db = Database(db_path) with db.conn: db["files"].insert( { "name": "lorem.txt", "sz": 16984, "data": LOREM_IPSUM_COMPRESSED, }, pk="name", ) result = CliRunner().invoke(cli.cli, [db_path, "select name, sz, data from files"]) assert result.exit_code == 0, str(result) assert json.loads(result.output.strip()) == [ { "name": "lorem.txt", "sz": 16984, "data": { "$base64": True, "encoded": ( ( "eJzt0c1xAyEMBeC7q1ABHleR3HxNAQrIjmb4M0gelx+RTY7p4N2WBYT0vmufUknH" "8kq5lz5pqRFXsTOl3pYkE/NJnHXoStruJEVjc0mOCyTqq/ZMJnXEZW1Js2ZvRm5U+" "DPKk9hRWqjyvTFx0YfzhT6MpGmN2lR1fzxjyfVMD9dFrS+bnkleMpMam/ZGXgrX1I" "/K+5Au3S/9lNQRh0k4Gq/RUz8GiKfsQm+7JLsJ6fTo5JhVG00ZU76kZZkxePx49uI" "jnpNoJyYlWUsoaSl/CcVATje/Kxu13RANnrHweaH3V5Jh4jvGyKCnxJLiXPKhmW3f" "iCnG7Jql7RR3UvFo8jJ4z039dtOkTFmWzL1be9lt8A5II471m6vXy+l0BR/4wAc+8" "IEPfOADH/jABz7wgQ984AMf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984A" "Mf+MAHPvCBD3zgAx/4wAc+8IEPfOADH/jABz7wgQ984PuP7xubBoN9" ) ), }, } ] @pytest.mark.parametrize( "sql,params,expected", [ ("select 1 + 1 as out", {"p": "2"}, 2), ("select 1 + :p as out", {"p": "2"}, 3), ( "select :hello as out", {"hello": """This"has'many'quote"s"""}, """This"has'many'quote"s""", ), ], ) def test_query_params(db_path, sql, params, expected): extra_args = [] for key, value in params.items(): extra_args.extend(["-p", key, value]) result = CliRunner().invoke(cli.cli, [db_path, sql] + extra_args) assert result.exit_code == 0, str(result) assert json.loads(result.output.strip()) == [{"out": expected}] def test_query_json_with_json_cols(db_path): db = Database(db_path) with db.conn: db["dogs"].insert( { "id": 1, "name": "Cleo", "friends": [{"name": "Pancakes"}, {"name": "Bailey"}], } ) result = CliRunner().invoke( cli.cli, [db_path, "select id, name, friends from dogs"] ) assert ( r""" [{"id": 1, "name": "Cleo", "friends": "[{\"name\": \"Pancakes\"}, {\"name\": \"Bailey\"}]"}] """.strip() == result.output.strip() ) # With --json-cols: result = CliRunner().invoke( cli.cli, [db_path, "select id, name, friends from dogs", "--json-cols"] ) expected = r""" [{"id": 1, "name": "Cleo", "friends": [{"name": "Pancakes"}, {"name": "Bailey"}]}] """.strip() assert expected == result.output.strip() # Test rows command too result_rows = CliRunner().invoke(cli.cli, ["rows", db_path, "dogs", "--json-cols"]) assert expected == result_rows.output.strip() @pytest.mark.parametrize( "content,is_binary", [(b"\x00\x0Fbinary", True), ("this is text", False), (1, False), (1.5, False)], ) def test_query_raw(db_path, content, is_binary): Database(db_path)["files"].insert({"content": content}) result = CliRunner().invoke( cli.cli, [db_path, "select content from files", "--raw"] ) if is_binary: assert result.stdout_bytes == content else: assert result.output == str(content) @pytest.mark.parametrize( "content,is_binary", [(b"\x00\x0Fbinary", True), ("this is text", False), (1, False), (1.5, False)], ) def test_query_raw_lines(db_path, content, is_binary): Database(db_path)["files"].insert_all({"content": content} for _ in range(3)) result = CliRunner().invoke( cli.cli, [db_path, "select content from files", "--raw-lines"] ) if is_binary: assert result.stdout_bytes == b"\n".join(content for _ in range(3)) + b"\n" else: assert result.output == "\n".join(str(content) for _ in range(3)) + "\n" def test_query_memory_does_not_create_file(tmpdir): owd = os.getcwd() try: os.chdir(tmpdir) # This should create a foo.db file CliRunner().invoke(cli.cli, ["foo.db", "select sqlite_version()"]) # This should NOT create a file result = CliRunner().invoke(cli.cli, [":memory:", "select sqlite_version()"]) assert ["sqlite_version()"] == list(json.loads(result.output)[0].keys()) finally: os.chdir(owd) assert ["foo.db"] == os.listdir(tmpdir) @pytest.mark.parametrize( "args,expected", [ ( [], '[{"id": 1, "name": "Cleo", "age": 4},\n {"id": 2, "name": "Pancakes", "age": 2}]', ), ( ["--nl"], '{"id": 1, "name": "Cleo", "age": 4}\n{"id": 2, "name": "Pancakes", "age": 2}', ), (["--arrays"], '[[1, "Cleo", 4],\n [2, "Pancakes", 2]]'), (["--arrays", "--nl"], '[1, "Cleo", 4]\n[2, "Pancakes", 2]'), ( ["--nl", "-c", "age", "-c", "name"], '{"age": 4, "name": "Cleo"}\n{"age": 2, "name": "Pancakes"}', ), # --limit and --offset ( ["-c", "name", "--limit", "1"], '[{"name": "Cleo"}]', ), ( ["-c", "name", "--limit", "1", "--offset", "1"], '[{"name": "Pancakes"}]', ), # --where ( ["-c", "name", "--where", "id = 1"], '[{"name": "Cleo"}]', ), ( ["-c", "name", "--where", "id = :id", "-p", "id", "1"], '[{"name": "Cleo"}]', ), ( ["-c", "name", "--where", "id = :id", "--param", "id", "1"], '[{"name": "Cleo"}]', ), # --order ( ["-c", "id", "--order", "id desc", "--limit", "1"], '[{"id": 2}]', ), ( ["-c", "id", "--order", "id", "--limit", "1"], '[{"id": 1}]', ), ], ) def test_rows(db_path, args, expected): db = Database(db_path) with db.conn: db["dogs"].insert_all( [ {"id": 1, "age": 4, "name": "Cleo"}, {"id": 2, "age": 2, "name": "Pancakes"}, ], column_order=("id", "name", "age"), ) result = CliRunner().invoke(cli.cli, ["rows", db_path, "dogs"] + args) assert expected == result.output.strip() def test_upsert(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") db = Database(db_path) insert_dogs = [ {"id": 1, "name": "Cleo", "age": 4}, {"id": 2, "name": "Nixie", "age": 4}, ] write_json(json_path, insert_dogs) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"], catch_exceptions=False, ) assert result.exit_code == 0, result.output assert 2 == db["dogs"].count # Now run the upsert to update just their ages upsert_dogs = [ {"id": 1, "age": 5}, {"id": 2, "age": 5}, ] write_json(json_path, upsert_dogs) result = CliRunner().invoke( cli.cli, ["upsert", db_path, "dogs", json_path, "--pk", "id"], catch_exceptions=False, ) assert result.exit_code == 0, result.output assert list(db.query("select * from dogs order by id")) == [ {"id": 1, "name": "Cleo", "age": 5}, {"id": 2, "name": "Nixie", "age": 5}, ] def test_upsert_pk_required(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") insert_dogs = [ {"id": 1, "name": "Cleo", "age": 4}, {"id": 2, "name": "Nixie", "age": 4}, ] write_json(json_path, insert_dogs) result = CliRunner().invoke( cli.cli, ["upsert", db_path, "dogs", json_path], catch_exceptions=False, ) assert result.exit_code == 2 assert "Error: Missing option '--pk'" in result.output def test_upsert_analyze(db_path, tmpdir): db = Database(db_path) db["rows"].insert({"id": 1, "foo": "x", "n": 3}, pk="id") db["rows"].create_index(["n"]) assert "sqlite_stat1" not in db.table_names() result = CliRunner().invoke( cli.cli, ["upsert", db_path, "rows", "-", "--nl", "--analyze", "--pk", "id"], input='{"id": 2, "foo": "bar", "n": 1}', ) assert result.exit_code == 0, result.output assert "sqlite_stat1" in db.table_names() def test_upsert_flatten(tmpdir): db_path = str(tmpdir / "flat.db") db = Database(db_path) db["upsert_me"].insert({"id": 1, "name": "Example"}, pk="id") result = CliRunner().invoke( cli.cli, ["upsert", db_path, "upsert_me", "-", "--flatten", "--pk", "id", "--alter"], input=json.dumps({"id": 1, "nested": {"two": 2}}), ) assert result.exit_code == 0 assert list(db.query("select * from upsert_me")) == [ {"id": 1, "name": "Example", "nested_two": 2} ] def test_upsert_alter(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") db = Database(db_path) insert_dogs = [{"id": 1, "name": "Cleo"}] write_json(json_path, insert_dogs) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"] ) assert result.exit_code == 0, result.output # Should fail with error code if no --alter upsert_dogs = [{"id": 1, "age": 5}] write_json(json_path, upsert_dogs) result = CliRunner().invoke( cli.cli, ["upsert", db_path, "dogs", json_path, "--pk", "id"] ) assert result.exit_code == 1 assert ( "Error: no such column: age\n\n" "sql = UPDATE [dogs] SET [age] = ? WHERE [id] = ?\n" "parameters = [5, 1]" ) == result.output.strip() # Should succeed with --alter result = CliRunner().invoke( cli.cli, ["upsert", db_path, "dogs", json_path, "--pk", "id", "--alter"] ) assert result.exit_code == 0 assert list(db.query("select * from dogs order by id")) == [ {"id": 1, "name": "Cleo", "age": 5}, ] @pytest.mark.parametrize( "args,schema", [ # No primary key ( [ "name", "text", "age", "integer", ], ("CREATE TABLE [t] (\n [name] TEXT,\n [age] INTEGER\n)"), ), # All types: ( [ "id", "integer", "name", "text", "age", "integer", "weight", "float", "thumbnail", "blob", "--pk", "id", ], ( "CREATE TABLE [t] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [age] INTEGER,\n" " [weight] FLOAT,\n" " [thumbnail] BLOB\n" ")" ), ), # Not null: ( ["name", "text", "--not-null", "name"], ("CREATE TABLE [t] (\n" " [name] TEXT NOT NULL\n" ")"), ), # Default: ( ["age", "integer", "--default", "age", "3"], ("CREATE TABLE [t] (\n" " [age] INTEGER DEFAULT '3'\n" ")"), ), ], ) def test_create_table(args, schema): runner = CliRunner() with runner.isolated_filesystem(): result = runner.invoke( cli.cli, [ "create-table", "test.db", "t", ] + args, catch_exceptions=False, ) assert result.exit_code == 0 db = Database("test.db") assert schema == db["t"].schema def test_create_table_foreign_key(): runner = CliRunner() creates = ( ["authors", "id", "integer", "name", "text", "--pk", "id"], [ "books", "id", "integer", "title", "text", "author_id", "integer", "--pk", "id", "--fk", "author_id", "authors", "id", ], ) with runner.isolated_filesystem(): for args in creates: result = runner.invoke( cli.cli, ["create-table", "books.db"] + args, catch_exceptions=False ) assert result.exit_code == 0 db = Database("books.db") assert ( "CREATE TABLE [authors] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT\n" ")" ) == db["authors"].schema assert ( "CREATE TABLE [books] (\n" " [id] INTEGER PRIMARY KEY,\n" " [title] TEXT,\n" " [author_id] INTEGER REFERENCES [authors]([id])\n" ")" ) == db["books"].schema def test_create_table_error_if_table_exists(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["dogs"].insert({"name": "Cleo"}) result = runner.invoke( cli.cli, ["create-table", "test.db", "dogs", "id", "integer"] ) assert result.exit_code == 1 assert ( 'Error: Table "dogs" already exists. Use --replace to delete and replace it.' == result.output.strip() ) def test_create_table_ignore(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["dogs"].insert({"name": "Cleo"}) result = runner.invoke( cli.cli, ["create-table", "test.db", "dogs", "id", "integer", "--ignore"] ) assert result.exit_code == 0 assert "CREATE TABLE [dogs] (\n [name] TEXT\n)" == db["dogs"].schema def test_create_table_replace(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["dogs"].insert({"name": "Cleo"}) result = runner.invoke( cli.cli, ["create-table", "test.db", "dogs", "id", "integer", "--replace"] ) assert result.exit_code == 0 assert "CREATE TABLE [dogs] (\n [id] INTEGER\n)" == db["dogs"].schema def test_create_view(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") result = runner.invoke( cli.cli, ["create-view", "test.db", "version", "select sqlite_version()"] ) assert result.exit_code == 0 assert "CREATE VIEW version AS select sqlite_version()" == db["version"].schema def test_create_view_error_if_view_exists(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db.create_view("version", "select sqlite_version() + 1") result = runner.invoke( cli.cli, ["create-view", "test.db", "version", "select sqlite_version()"] ) assert result.exit_code == 1 assert ( 'Error: View "version" already exists. Use --replace to delete and replace it.' == result.output.strip() ) def test_create_view_ignore(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db.create_view("version", "select sqlite_version() + 1") result = runner.invoke( cli.cli, [ "create-view", "test.db", "version", "select sqlite_version()", "--ignore", ], ) assert result.exit_code == 0 assert ( "CREATE VIEW version AS select sqlite_version() + 1" == db["version"].schema ) def test_create_view_replace(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db.create_view("version", "select sqlite_version() + 1") result = runner.invoke( cli.cli, [ "create-view", "test.db", "version", "select sqlite_version()", "--replace", ], ) assert result.exit_code == 0 assert "CREATE VIEW version AS select sqlite_version()" == db["version"].schema def test_drop_table(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["t"].create({"pk": int}, pk="pk") assert "t" in db.table_names() result = runner.invoke( cli.cli, [ "drop-table", "test.db", "t", ], ) assert result.exit_code == 0 assert "t" not in db.table_names() def test_drop_table_error(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["t"].create({"pk": int}, pk="pk") result = runner.invoke( cli.cli, [ "drop-table", "test.db", "t2", ], ) assert result.exit_code == 1 assert 'Error: Table "t2" does not exist' == result.output.strip() # Using --ignore suppresses that error result = runner.invoke( cli.cli, ["drop-table", "test.db", "t2", "--ignore"], ) assert result.exit_code == 0 def test_drop_view(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db.create_view("hello", "select 1") assert "hello" in db.view_names() result = runner.invoke( cli.cli, [ "drop-view", "test.db", "hello", ], ) assert result.exit_code == 0 assert "hello" not in db.view_names() def test_drop_view_error(): runner = CliRunner() with runner.isolated_filesystem(): db = Database("test.db") db["t"].create({"pk": int}, pk="pk") result = runner.invoke( cli.cli, [ "drop-view", "test.db", "t2", ], ) assert result.exit_code == 1 assert 'Error: View "t2" does not exist' == result.output.strip() # Using --ignore suppresses that error result = runner.invoke( cli.cli, ["drop-view", "test.db", "t2", "--ignore"], ) assert result.exit_code == 0 def test_enable_wal(): runner = CliRunner() dbs = ["test.db", "test2.db"] with runner.isolated_filesystem(): for dbname in dbs: db = Database(dbname) db["t"].create({"pk": int}, pk="pk") assert db.journal_mode == "delete" result = runner.invoke(cli.cli, ["enable-wal"] + dbs, catch_exceptions=False) assert result.exit_code == 0 for dbname in dbs: db = Database(dbname) assert db.journal_mode == "wal" def test_disable_wal(): runner = CliRunner() dbs = ["test.db", "test2.db"] with runner.isolated_filesystem(): for dbname in dbs: db = Database(dbname) db["t"].create({"pk": int}, pk="pk") db.enable_wal() assert db.journal_mode == "wal" result = runner.invoke(cli.cli, ["disable-wal"] + dbs) assert result.exit_code == 0 for dbname in dbs: db = Database(dbname) assert db.journal_mode == "delete" @pytest.mark.parametrize( "args,expected", [ ( [], '[{"rows_affected": 1}]', ), (["-t"], "rows_affected\n---------------\n 1"), ], ) def test_query_update(db_path, args, expected): db = Database(db_path) with db.conn: db["dogs"].insert_all( [ {"id": 1, "age": 4, "name": "Cleo"}, ] ) result = CliRunner().invoke( cli.cli, [db_path, "update dogs set age = 5 where name = 'Cleo'"] + args ) assert expected == result.output.strip() assert list(db.query("select * from dogs")) == [ {"id": 1, "age": 5, "name": "Cleo"}, ] def test_add_foreign_keys(db_path): db = Database(db_path) db["countries"].insert({"id": 7, "name": "Panama"}, pk="id") db["authors"].insert({"id": 3, "name": "Matilda", "country_id": 7}, pk="id") db["books"].insert({"id": 2, "title": "Wolf anatomy", "author_id": 3}, pk="id") assert db["authors"].foreign_keys == [] assert db["books"].foreign_keys == [] result = CliRunner().invoke( cli.cli, [ "add-foreign-keys", db_path, "authors", "country_id", "countries", "id", "books", "author_id", "authors", "id", ], ) assert result.exit_code == 0 assert db["authors"].foreign_keys == [ ForeignKey( table="authors", column="country_id", other_table="countries", other_column="id", ) ] assert db["books"].foreign_keys == [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ) ] @pytest.mark.parametrize( "args,expected_schema", [ ( [], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT\n" ")" ), ), ( ["--type", "age", "text"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] TEXT NOT NULL DEFAULT '1',\n" " [name] TEXT\n" ")" ), ), ( ["--drop", "age"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT\n" ")" ), ), ( ["--rename", "age", "age2", "--rename", "id", "pk"], ( 'CREATE TABLE "dogs" (\n' " [pk] INTEGER PRIMARY KEY,\n" " [age2] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT\n" ")" ), ), ( ["--not-null", "name"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT NOT NULL\n" ")" ), ), ( ["--not-null-false", "age"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] INTEGER DEFAULT '1',\n" " [name] TEXT\n" ")" ), ), ( ["--pk", "name"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT PRIMARY KEY\n" ")" ), ), ( ["--pk-none"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT\n" ")" ), ), ( ["--default", "name", "Turnip"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [name] TEXT DEFAULT 'Turnip'\n" ")" ), ), ( ["--default-none", "age"], ( 'CREATE TABLE "dogs" (\n' " [id] INTEGER PRIMARY KEY,\n" " [age] INTEGER NOT NULL,\n" " [name] TEXT\n" ")" ), ), ( ["-o", "name", "--column-order", "age", "-o", "id"], ( 'CREATE TABLE "dogs" (\n' " [name] TEXT,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [id] INTEGER PRIMARY KEY\n" ")" ), ), ], ) def test_transform(db_path, args, expected_schema): db = Database(db_path) with db.conn: db["dogs"].insert( {"id": 1, "age": 4, "name": "Cleo"}, not_null={"age"}, defaults={"age": 1}, pk="id", ) result = CliRunner().invoke(cli.cli, ["transform", db_path, "dogs"] + args) print(result.output) assert result.exit_code == 0 schema = db["dogs"].schema assert schema == expected_schema @pytest.mark.parametrize( "extra_args,expected_schema", ( ( ["--drop-foreign-key", "country"], ( 'CREATE TABLE "places" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [country] INTEGER,\n" " [city] INTEGER REFERENCES [city]([id]),\n" " [continent] INTEGER\n" ")" ), ), ( ["--drop-foreign-key", "country", "--drop-foreign-key", "city"], ( 'CREATE TABLE "places" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [country] INTEGER,\n" " [city] INTEGER,\n" " [continent] INTEGER\n" ")" ), ), ( ["--add-foreign-key", "continent", "continent", "id"], ( 'CREATE TABLE "places" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [country] INTEGER REFERENCES [country]([id]),\n" " [city] INTEGER REFERENCES [city]([id]),\n" " [continent] INTEGER REFERENCES [continent]([id])\n" ")" ), ), ), ) def test_transform_add_or_drop_foreign_key(db_path, extra_args, expected_schema): db = Database(db_path) with db.conn: # Create table with three foreign keys so we can drop two of them db["continent"].insert({"id": 1, "name": "Europe"}, pk="id") db["country"].insert({"id": 1, "name": "France"}, pk="id") db["city"].insert({"id": 24, "name": "Paris"}, pk="id") db["places"].insert( { "id": 32, "name": "Caveau de la Huchette", "country": 1, "city": 24, "continent": 1, }, foreign_keys=("country", "city"), pk="id", ) result = CliRunner().invoke( cli.cli, [ "transform", db_path, "places", ] + extra_args, ) assert result.exit_code == 0 schema = db["places"].schema assert schema == expected_schema _common_other_schema = ( "CREATE TABLE [species] (\n [id] INTEGER PRIMARY KEY,\n [species] TEXT\n)" ) @pytest.mark.parametrize( "args,expected_table_schema,expected_other_schema", [ ( [], ( 'CREATE TABLE "trees" (\n' " [id] INTEGER PRIMARY KEY,\n" " [address] TEXT,\n" " [species_id] INTEGER REFERENCES [species]([id])\n" ")" ), _common_other_schema, ), ( ["--table", "custom_table"], ( 'CREATE TABLE "trees" (\n' " [id] INTEGER PRIMARY KEY,\n" " [address] TEXT,\n" " [custom_table_id] INTEGER REFERENCES [custom_table]([id])\n" ")" ), "CREATE TABLE [custom_table] (\n [id] INTEGER PRIMARY KEY,\n [species] TEXT\n)", ), ( ["--fk-column", "custom_fk"], ( 'CREATE TABLE "trees" (\n' " [id] INTEGER PRIMARY KEY,\n" " [address] TEXT,\n" " [custom_fk] INTEGER REFERENCES [species]([id])\n" ")" ), _common_other_schema, ), ( ["--rename", "name", "name2"], 'CREATE TABLE "trees" (\n' " [id] INTEGER PRIMARY KEY,\n" " [address] TEXT,\n" " [species_id] INTEGER REFERENCES [species]([id])\n" ")", "CREATE TABLE [species] (\n [id] INTEGER PRIMARY KEY,\n [species] TEXT\n)", ), ], ) def test_extract(db_path, args, expected_table_schema, expected_other_schema): db = Database(db_path) with db.conn: db["trees"].insert( {"id": 1, "address": "4 Park Ave", "species": "Palm"}, pk="id", ) result = CliRunner().invoke( cli.cli, ["extract", db_path, "trees", "species"] + args ) print(result.output) assert result.exit_code == 0 schema = db["trees"].schema assert schema == expected_table_schema other_schema = [t for t in db.tables if t.name not in ("trees", "Gosh", "Gosh2")][ 0 ].schema assert other_schema == expected_other_schema def test_insert_encoding(tmpdir): db_path = str(tmpdir / "test.db") latin1_csv = ( b"date,name,latitude,longitude\n" b"2020-01-01,Barra da Lagoa,-27.574,-48.422\n" b"2020-03-04,S\xe3o Paulo,-23.561,-46.645\n" b"2020-04-05,Salta,-24.793:-65.408" ) assert latin1_csv.decode("latin-1").split("\n")[2].split(",")[1] == "São Paulo" csv_path = str(tmpdir / "test.csv") with open(csv_path, "wb") as fp: fp.write(latin1_csv) # First attempt should error: bad_result = CliRunner().invoke( cli.cli, ["insert", db_path, "places", csv_path, "--csv"], catch_exceptions=False, ) assert bad_result.exit_code == 1 assert ( "The input you provided uses a character encoding other than utf-8" in bad_result.output ) # Using --encoding=latin-1 should work good_result = CliRunner().invoke( cli.cli, ["insert", db_path, "places", csv_path, "--encoding", "latin-1", "--csv"], catch_exceptions=False, ) assert good_result.exit_code == 0 db = Database(db_path) assert list(db["places"].rows) == [ { "date": "2020-01-01", "name": "Barra da Lagoa", "latitude": "-27.574", "longitude": "-48.422", }, { "date": "2020-03-04", "name": "São Paulo", "latitude": "-23.561", "longitude": "-46.645", }, { "date": "2020-04-05", "name": "Salta", "latitude": "-24.793:-65.408", "longitude": None, }, ] @pytest.mark.parametrize("fts", ["FTS4", "FTS5"]) @pytest.mark.parametrize( "extra_arg,expected", [ ( None, '[{"rowid": 2, "id": 2, "title": "Title the second"}]\n', ), ("--csv", "rowid,id,title\n2,2,Title the second\n"), ], ) def test_search(tmpdir, fts, extra_arg, expected): db_path = str(tmpdir / "test.db") db = Database(db_path) db["articles"].insert_all( [ {"id": 1, "title": "Title the first"}, {"id": 2, "title": "Title the second"}, {"id": 3, "title": "Title the third"}, ], pk="id", ) db["articles"].enable_fts(["title"], fts_version=fts) result = CliRunner().invoke( cli.cli, ["search", db_path, "articles", "second"] + ([extra_arg] if extra_arg else []), catch_exceptions=False, ) assert result.exit_code == 0 assert result.output.replace("\r", "") == expected def test_search_quote(tmpdir): db_path = str(tmpdir / "test.db") db = Database(db_path) db["creatures"].insert({"name": "dog."}).enable_fts(["name"]) # Without --quote should return an error error_result = CliRunner().invoke(cli.cli, ["search", db_path, "creatures", 'dog"']) assert error_result.exit_code == 1 assert error_result.output == ( "Error: unterminated string\n\n" "Try running this again with the --quote option\n" ) # With --quote it should work result = CliRunner().invoke( cli.cli, ["search", db_path, "creatures", 'dog"', "--quote"] ) assert result.exit_code == 0 assert result.output.strip() == '[{"rowid": 1, "name": "dog."}]' def test_indexes(tmpdir): db_path = str(tmpdir / "test.db") db = Database(db_path) db.conn.executescript( """ create table Gosh (c1 text, c2 text, c3 text); create index Gosh_idx on Gosh(c2, c3 desc); """ ) result = CliRunner().invoke( cli.cli, ["indexes", str(db_path)], catch_exceptions=False, ) assert result.exit_code == 0 assert json.loads(result.output) == [ { "table": "Gosh", "index_name": "Gosh_idx", "seqno": 0, "cid": 1, "name": "c2", "desc": 0, "coll": "BINARY", "key": 1, }, { "table": "Gosh", "index_name": "Gosh_idx", "seqno": 1, "cid": 2, "name": "c3", "desc": 1, "coll": "BINARY", "key": 1, }, ] result2 = CliRunner().invoke( cli.cli, ["indexes", str(db_path), "--aux"], catch_exceptions=False, ) assert result2.exit_code == 0 assert json.loads(result2.output) == [ { "table": "Gosh", "index_name": "Gosh_idx", "seqno": 0, "cid": 1, "name": "c2", "desc": 0, "coll": "BINARY", "key": 1, }, { "table": "Gosh", "index_name": "Gosh_idx", "seqno": 1, "cid": 2, "name": "c3", "desc": 1, "coll": "BINARY", "key": 1, }, { "table": "Gosh", "index_name": "Gosh_idx", "seqno": 2, "cid": -1, "name": None, "desc": 0, "coll": "BINARY", "key": 0, }, ] _TRIGGERS_EXPECTED = ( '[{"name": "blah", "table": "articles", "sql": "CREATE TRIGGER blah ' 'AFTER INSERT ON articles\\nBEGIN\\n UPDATE counter SET count = count + 1;\\nEND"}]\n' ) @pytest.mark.parametrize( "extra_args,expected", [ ([], _TRIGGERS_EXPECTED), (["articles"], _TRIGGERS_EXPECTED), (["counter"], "[]\n"), ], ) def test_triggers(tmpdir, extra_args, expected): db_path = str(tmpdir / "test.db") db = Database(db_path) db["articles"].insert( {"id": 1, "title": "Title the first"}, pk="id", ) db["counter"].insert({"count": 1}) db.conn.execute( textwrap.dedent( """ CREATE TRIGGER blah AFTER INSERT ON articles BEGIN UPDATE counter SET count = count + 1; END """ ) ) args = ["triggers", db_path] if extra_args: args.extend(extra_args) result = CliRunner().invoke( cli.cli, args, catch_exceptions=False, ) assert result.exit_code == 0 assert result.output == expected @pytest.mark.parametrize( "options,expected", ( ( [], ( "CREATE TABLE [dogs] (\n" " [id] INTEGER,\n" " [name] TEXT\n" ");\n" "CREATE TABLE [chickens] (\n" " [id] INTEGER,\n" " [name] TEXT,\n" " [breed] TEXT\n" ");\n" "CREATE INDEX [idx_chickens_breed]\n" " ON [chickens] ([breed]);\n" ), ), ( ["dogs"], ("CREATE TABLE [dogs] (\n" " [id] INTEGER,\n" " [name] TEXT\n" ")\n"), ), ( ["chickens", "dogs"], ( "CREATE TABLE [chickens] (\n" " [id] INTEGER,\n" " [name] TEXT,\n" " [breed] TEXT\n" ")\n" "CREATE TABLE [dogs] (\n" " [id] INTEGER,\n" " [name] TEXT\n" ")\n" ), ), ), ) def test_schema(tmpdir, options, expected): db_path = str(tmpdir / "test.db") db = Database(db_path) db["dogs"].create({"id": int, "name": str}) db["chickens"].create({"id": int, "name": str, "breed": str}) db["chickens"].create_index(["breed"]) result = CliRunner().invoke( cli.cli, ["schema", db_path] + options, catch_exceptions=False, ) assert result.exit_code == 0 assert result.output == expected def test_long_csv_column_value(tmpdir): db_path = str(tmpdir / "test.db") csv_path = str(tmpdir / "test.csv") csv_file = open(csv_path, "w") long_string = "a" * 131073 csv_file.write("id,text\n") csv_file.write("1,{}\n".format(long_string)) csv_file.close() result = CliRunner().invoke( cli.cli, ["insert", db_path, "bigtable", csv_path, "--csv"], catch_exceptions=False, ) assert result.exit_code == 0 db = Database(db_path) rows = list(db["bigtable"].rows) assert len(rows) == 1 assert rows[0]["text"] == long_string @pytest.mark.parametrize( "args,tsv", ( (["--csv", "--no-headers"], False), (["--no-headers"], False), (["--tsv", "--no-headers"], True), ), ) def test_import_no_headers(tmpdir, args, tsv): db_path = str(tmpdir / "test.db") csv_path = str(tmpdir / "test.csv") csv_file = open(csv_path, "w") sep = "\t" if tsv else "," csv_file.write("Cleo{sep}Dog{sep}5\n".format(sep=sep)) csv_file.write("Tracy{sep}Spider{sep}7\n".format(sep=sep)) csv_file.close() result = CliRunner().invoke( cli.cli, ["insert", db_path, "creatures", csv_path] + args, catch_exceptions=False, ) assert result.exit_code == 0, result.output db = Database(db_path) schema = db["creatures"].schema assert schema == ( "CREATE TABLE [creatures] (\n" " [untitled_1] TEXT,\n" " [untitled_2] TEXT,\n" " [untitled_3] TEXT\n" ")" ) rows = list(db["creatures"].rows) assert rows == [ {"untitled_1": "Cleo", "untitled_2": "Dog", "untitled_3": "5"}, {"untitled_1": "Tracy", "untitled_2": "Spider", "untitled_3": "7"}, ] def test_attach(tmpdir): foo_path = str(tmpdir / "foo.db") bar_path = str(tmpdir / "bar.db") db = Database(foo_path) with db.conn: db["foo"].insert({"id": 1, "text": "foo"}) db2 = Database(bar_path) with db2.conn: db2["bar"].insert({"id": 1, "text": "bar"}) db.attach("bar", bar_path) sql = "select * from foo union all select * from bar.bar" result = CliRunner().invoke( cli.cli, [foo_path, "--attach", "bar", bar_path, sql], catch_exceptions=False, ) assert json.loads(result.output) == [ {"id": 1, "text": "foo"}, {"id": 1, "text": "bar"}, ] def test_csv_insert_bom(tmpdir): db_path = str(tmpdir / "test.db") bom_csv_path = str(tmpdir / "bom.csv") with open(bom_csv_path, "wb") as fp: fp.write(b"\xef\xbb\xbfname,age\nCleo,5") result = CliRunner().invoke( cli.cli, ["insert", db_path, "broken", bom_csv_path, "--encoding", "utf-8", "--csv"], catch_exceptions=False, ) assert result.exit_code == 0 result2 = CliRunner().invoke( cli.cli, ["insert", db_path, "fixed", bom_csv_path, "--csv"], catch_exceptions=False, ) assert result2.exit_code == 0 db = Database(db_path) tables = db.execute("select name, sql from sqlite_master").fetchall() assert tables == [ ("broken", "CREATE TABLE [broken] (\n [\ufeffname] TEXT,\n [age] TEXT\n)"), ("fixed", "CREATE TABLE [fixed] (\n [name] TEXT,\n [age] TEXT\n)"), ] @pytest.mark.parametrize("option_or_env_var", (None, "-d", "--detect-types")) def test_insert_detect_types(tmpdir, option_or_env_var): db_path = str(tmpdir / "test.db") data = "name,age,weight\nCleo,6,45.5\nDori,1,3.5" extra = [] if option_or_env_var: extra = [option_or_env_var] def _test(): result = CliRunner().invoke( cli.cli, ["insert", db_path, "creatures", "-", "--csv"] + extra, catch_exceptions=False, input=data, ) assert result.exit_code == 0 db = Database(db_path) assert list(db["creatures"].rows) == [ {"name": "Cleo", "age": 6, "weight": 45.5}, {"name": "Dori", "age": 1, "weight": 3.5}, ] if option_or_env_var is None: # Use environment variable instead of option with mock.patch.dict(os.environ, {"SQLITE_UTILS_DETECT_TYPES": "1"}): _test() else: _test() @pytest.mark.parametrize("option", ("-d", "--detect-types")) def test_upsert_detect_types(tmpdir, option): db_path = str(tmpdir / "test.db") data = "id,name,age,weight\n1,Cleo,6,45.5\n2,Dori,1,3.5" result = CliRunner().invoke( cli.cli, ["upsert", db_path, "creatures", "-", "--csv", "--pk", "id"] + [option], catch_exceptions=False, input=data, ) assert result.exit_code == 0 db = Database(db_path) assert list(db["creatures"].rows) == [ {"id": 1, "name": "Cleo", "age": 6, "weight": 45.5}, {"id": 2, "name": "Dori", "age": 1, "weight": 3.5}, ] def test_integer_overflow_error(tmpdir): db_path = str(tmpdir / "test.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "items", "-"], input=json.dumps({"bignumber": 34223049823094832094802398430298048240}), ) assert result.exit_code == 1 assert result.output == ( "Error: Python int too large to convert to SQLite INTEGER\n\n" "sql = INSERT INTO [items] ([bignumber]) VALUES (?);\n" "parameters = [34223049823094832094802398430298048240]\n" ) def test_python_dash_m(): "Tool can be run using python -m sqlite_utils" result = subprocess.run( [sys.executable, "-m", "sqlite_utils", "--help"], stdout=subprocess.PIPE ) assert result.returncode == 0 assert b"Commands for interacting with a SQLite database" in result.stdout @pytest.mark.parametrize("enable_wal", (False, True)) def test_create_database(tmpdir, enable_wal): db_path = tmpdir / "test.db" assert not db_path.exists() args = ["create-database", str(db_path)] if enable_wal: args.append("--enable-wal") result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output assert db_path.exists() assert db_path.read_binary()[:16] == b"SQLite format 3\x00" db = Database(str(db_path)) if enable_wal: assert db.journal_mode == "wal" else: assert db.journal_mode == "delete" @pytest.mark.parametrize( "options,expected", ( ( [], [ {"tbl": "two_indexes", "idx": "idx_two_indexes_species", "stat": "1 1"}, {"tbl": "two_indexes", "idx": "idx_two_indexes_name", "stat": "1 1"}, {"tbl": "one_index", "idx": "idx_one_index_name", "stat": "1 1"}, ], ), ( ["one_index"], [ {"tbl": "one_index", "idx": "idx_one_index_name", "stat": "1 1"}, ], ), ( ["idx_two_indexes_name"], [ {"tbl": "two_indexes", "idx": "idx_two_indexes_name", "stat": "1 1"}, ], ), ), ) def test_analyze(tmpdir, options, expected): db_path = str(tmpdir / "test.db") db = Database(db_path) db["one_index"].insert({"id": 1, "name": "Cleo"}, pk="id") db["one_index"].create_index(["name"]) db["two_indexes"].insert({"id": 1, "name": "Cleo", "species": "dog"}, pk="id") db["two_indexes"].create_index(["name"]) db["two_indexes"].create_index(["species"]) result = CliRunner().invoke(cli.cli, ["analyze", db_path] + options) assert result.exit_code == 0 assert list(db["sqlite_stat1"].rows) == expected def test_rename_table(tmpdir): db_path = str(tmpdir / "test.db") db = Database(db_path) db["one"].insert({"id": 1, "name": "Cleo"}, pk="id") # First try a non-existent table result_error = CliRunner().invoke( cli.cli, ["rename-table", db_path, "missing", "two"], catch_exceptions=False, ) assert result_error.exit_code == 1 assert result_error.output == ( 'Error: Table "missing" could not be renamed. ' "no such table: missing\n" ) # And check --ignore works result_error2 = CliRunner().invoke( cli.cli, ["rename-table", db_path, "missing", "two", "--ignore"], catch_exceptions=False, ) assert result_error2.exit_code == 0 previous_columns = db["one"].columns_dict # Now try for a table that exists result = CliRunner().invoke( cli.cli, ["rename-table", db_path, "one", "two"], catch_exceptions=False, ) assert result.exit_code == 0 assert db["two"].columns_dict == previous_columns def test_duplicate_table(tmpdir): db_path = str(tmpdir / "test.db") db = Database(db_path) db["one"].insert({"id": 1, "name": "Cleo"}, pk="id") # First try a non-existent table result_error = CliRunner().invoke( cli.cli, ["duplicate", db_path, "missing", "two"], catch_exceptions=False, ) assert result_error.exit_code == 1 assert result_error.output == 'Error: Table "missing" does not exist\n' # And check --ignore works result_error2 = CliRunner().invoke( cli.cli, ["duplicate", db_path, "missing", "two", "--ignore"], catch_exceptions=False, ) assert result_error2.exit_code == 0 # Now try for a table that exists result = CliRunner().invoke( cli.cli, ["duplicate", db_path, "one", "two"], catch_exceptions=False, ) assert result.exit_code == 0 assert db["one"].columns_dict == db["two"].columns_dict assert list(db["one"].rows) == list(db["two"].rows) @pytest.mark.skipif(not _has_compiled_ext(), reason="Requires compiled ext.c") @pytest.mark.parametrize( "entrypoint,should_pass,should_fail", ( (None, ("a",), ("b", "c")), ("sqlite3_ext_b_init", ("b"), ("a", "c")), ("sqlite3_ext_c_init", ("c"), ("a", "b")), ), ) def test_load_extension(entrypoint, should_pass, should_fail): ext = COMPILED_EXTENSION_PATH if entrypoint: ext += ":" + entrypoint for func in should_pass: result = CliRunner().invoke( cli.cli, ["memory", "select {}()".format(func), "--load-extension", ext], catch_exceptions=False, ) assert result.exit_code == 0 for func in should_fail: result = CliRunner().invoke( cli.cli, ["memory", "select {}()".format(func), "--load-extension", ext], catch_exceptions=False, ) assert result.exit_code == 1 sqlite-utils-3.35.2/tests/test_cli_bulk.py000066400000000000000000000046341452131415600206130ustar00rootroot00000000000000from click.testing import CliRunner from sqlite_utils import cli, Database import pathlib import pytest import subprocess import sys import time @pytest.fixture def test_db_and_path(tmpdir): db_path = str(pathlib.Path(tmpdir) / "data.db") db = Database(db_path) db["example"].insert_all( [ {"id": 1, "name": "One"}, {"id": 2, "name": "Two"}, ], pk="id", ) return db, db_path def test_cli_bulk(test_db_and_path): db, db_path = test_db_and_path result = CliRunner().invoke( cli.cli, [ "bulk", db_path, "insert into example (id, name) values (:id, myupper(:name))", "-", "--nl", "--functions", "myupper = lambda s: s.upper()", ], input='{"id": 3, "name": "Three"}\n{"id": 4, "name": "Four"}\n', ) assert result.exit_code == 0, result.output assert [ {"id": 1, "name": "One"}, {"id": 2, "name": "Two"}, {"id": 3, "name": "THREE"}, {"id": 4, "name": "FOUR"}, ] == list(db["example"].rows) def test_cli_bulk_batch_size(test_db_and_path): db, db_path = test_db_and_path proc = subprocess.Popen( [ sys.executable, "-m", "sqlite_utils", "bulk", db_path, "insert into example (id, name) values (:id, :name)", "-", "--nl", "--batch-size", "2", ], stdin=subprocess.PIPE, stdout=sys.stdout, ) # Writing one record should not commit proc.stdin.write(b'{"id": 3, "name": "Three"}\n\n') proc.stdin.flush() time.sleep(1) assert db["example"].count == 2 # Writing another should trigger a commit: proc.stdin.write(b'{"id": 4, "name": "Four"}\n\n') proc.stdin.flush() time.sleep(1) assert db["example"].count == 4 proc.stdin.close() proc.wait() assert proc.returncode == 0 def test_cli_bulk_error(test_db_and_path): _, db_path = test_db_and_path result = CliRunner().invoke( cli.cli, [ "bulk", db_path, "insert into example (id, name) value (:id, :name)", "-", "--nl", ], input='{"id": 3, "name": "Three"}', ) assert result.exit_code == 1 assert result.output == 'Error: near "value": syntax error\n' sqlite-utils-3.35.2/tests/test_cli_convert.py000066400000000000000000000424761452131415600213440ustar00rootroot00000000000000from click.testing import CliRunner from sqlite_utils import cli import sqlite_utils import json import textwrap import pathlib import pytest @pytest.fixture def test_db_and_path(fresh_db_and_path): db, db_path = fresh_db_and_path db["example"].insert_all( [ {"id": 1, "dt": "5th October 2019 12:04"}, {"id": 2, "dt": "6th October 2019 00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ], pk="id", ) return db, db_path @pytest.fixture def fresh_db_and_path(tmpdir): db_path = str(pathlib.Path(tmpdir) / "data.db") db = sqlite_utils.Database(db_path) return db, db_path @pytest.mark.parametrize( "code", [ "return value.replace('October', 'Spooktober')", # Return is optional: "value.replace('October', 'Spooktober')", # Multiple lines are supported: "v = value.replace('October', 'Spooktober')\nreturn v", # Can also define a convert() function "def convert(value): return value.replace('October', 'Spooktober')", # ... with imports "import re\n\ndef convert(value): return value.replace('October', 'Spooktober')", ], ) def test_convert_code(fresh_db_and_path, code): db, db_path = fresh_db_and_path db["t"].insert({"text": "October"}) result = CliRunner().invoke( cli.cli, ["convert", db_path, "t", "text", code], catch_exceptions=False ) assert result.exit_code == 0, result.output value = list(db["t"].rows)[0]["text"] assert value == "Spooktober" @pytest.mark.parametrize( "bad_code", ( "def foo(value)", "$", ), ) def test_convert_code_errors(fresh_db_and_path, bad_code): db, db_path = fresh_db_and_path db["t"].insert({"text": "October"}) result = CliRunner().invoke( cli.cli, ["convert", db_path, "t", "text", bad_code], catch_exceptions=False ) assert result.exit_code == 1 assert result.output == "Error: Could not compile code\n" def test_convert_import(test_db_and_path): db, db_path = test_db_and_path result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "dt", "return re.sub('O..', 'OXX', value)", "--import", "re", ], ) assert result.exit_code == 0, result.output assert [ {"id": 1, "dt": "5th OXXober 2019 12:04"}, {"id": 2, "dt": "6th OXXober 2019 00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ] == list(db["example"].rows) def test_convert_import_nested(fresh_db_and_path): db, db_path = fresh_db_and_path db["example"].insert({"xml": ''}) result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "xml", 'xml.etree.ElementTree.fromstring(value).attrib["name"]', "--import", "xml.etree.ElementTree", ], ) assert result.exit_code == 0, result.output assert [ {"xml": "Cleo"}, ] == list(db["example"].rows) def test_convert_dryrun(test_db_and_path): db, db_path = test_db_and_path result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "dt", "return re.sub('O..', 'OXX', value)", "--import", "re", "--dry-run", ], ) assert result.exit_code == 0 assert result.output.strip() == ( "5th October 2019 12:04\n" " --- becomes:\n" "5th OXXober 2019 12:04\n" "\n" "6th October 2019 00:05:06\n" " --- becomes:\n" "6th OXXober 2019 00:05:06\n" "\n" "\n" " --- becomes:\n" "\n" "\n" "None\n" " --- becomes:\n" "None\n\n" "Would affect 4 rows" ) # But it should not have actually modified the table data assert list(db["example"].rows) == [ {"id": 1, "dt": "5th October 2019 12:04"}, {"id": 2, "dt": "6th October 2019 00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ] # Test with a where clause too result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "dt", "return re.sub('O..', 'OXX', value)", "--import", "re", "--dry-run", "--where", "id = :id", "-p", "id", "4", ], ) assert result.exit_code == 0 assert result.output.strip().split("\n")[-1] == "Would affect 1 row" def test_convert_multi_dryrun(test_db_and_path): db_path = test_db_and_path[1] result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "dt", "{'foo': 'bar', 'baz': 1}", "--dry-run", "--multi", ], ) assert result.exit_code == 0 assert result.output.strip() == ( "5th October 2019 12:04\n" " --- becomes:\n" '{"foo": "bar", "baz": 1}\n' "\n" "6th October 2019 00:05:06\n" " --- becomes:\n" '{"foo": "bar", "baz": 1}\n' "\n" "\n" " --- becomes:\n" "\n" "\n" "None\n" " --- becomes:\n" "None\n" "\n" "Would affect 4 rows" ) @pytest.mark.parametrize("drop", (True, False)) def test_convert_output_column(test_db_and_path, drop): db, db_path = test_db_and_path args = [ "convert", db_path, "example", "dt", "value.replace('October', 'Spooktober')", "--output", "newcol", ] if drop: args += ["--drop"] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output expected = [ { "id": 1, "dt": "5th October 2019 12:04", "newcol": "5th Spooktober 2019 12:04", }, { "id": 2, "dt": "6th October 2019 00:05:06", "newcol": "6th Spooktober 2019 00:05:06", }, {"id": 3, "dt": "", "newcol": ""}, {"id": 4, "dt": None, "newcol": None}, ] if drop: for row in expected: del row["dt"] assert list(db["example"].rows) == expected @pytest.mark.parametrize( "output_type,expected", ( ("text", [(1, "1"), (2, "2"), (3, "3"), (4, "4")]), ("float", [(1, 1.0), (2, 2.0), (3, 3.0), (4, 4.0)]), ("integer", [(1, 1), (2, 2), (3, 3), (4, 4)]), (None, [(1, "1"), (2, "2"), (3, "3"), (4, "4")]), ), ) def test_convert_output_column_output_type(test_db_and_path, output_type, expected): db, db_path = test_db_and_path args = [ "convert", db_path, "example", "id", "value", "--output", "new_id", ] if output_type: args += ["--output-type", output_type] result = CliRunner().invoke( cli.cli, args, ) assert result.exit_code == 0, result.output assert expected == list(db.execute("select id, new_id from example")) @pytest.mark.parametrize( "options,expected_error", [ ( [ "dt", "id", "value.replace('October', 'Spooktober')", "--output", "newcol", ], "Cannot use --output with more than one column", ), ( [ "dt", "value.replace('October', 'Spooktober')", "--output", "newcol", "--output-type", "invalid", ], "Error: Invalid value for '--output-type'", ), ( [ "value.replace('October', 'Spooktober')", ], "Missing argument 'COLUMNS...'", ), ], ) def test_convert_output_error(test_db_and_path, options, expected_error): db_path = test_db_and_path[1] result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", ] + options, ) assert result.exit_code != 0 assert expected_error in result.output @pytest.mark.parametrize("drop", (True, False)) def test_convert_multi(fresh_db_and_path, drop): db, db_path = fresh_db_and_path db["creatures"].insert_all( [ {"id": 1, "name": "Simon"}, {"id": 2, "name": "Cleo"}, ], pk="id", ) args = [ "convert", db_path, "creatures", "name", "--multi", '{"upper": value.upper(), "lower": value.lower()}', ] if drop: args += ["--drop"] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output expected = [ {"id": 1, "name": "Simon", "upper": "SIMON", "lower": "simon"}, {"id": 2, "name": "Cleo", "upper": "CLEO", "lower": "cleo"}, ] if drop: for row in expected: del row["name"] assert list(db["creatures"].rows) == expected def test_convert_multi_complex_column_types(fresh_db_and_path): db, db_path = fresh_db_and_path db["rows"].insert_all( [ {"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, ], pk="id", ) code = textwrap.dedent( """ if value == 1: return {"is_str": "", "is_float": 1.2, "is_int": None} elif value == 2: return {"is_float": 1, "is_int": 12} elif value == 3: return {"is_bytes": b"blah"} """ ) result = CliRunner().invoke( cli.cli, [ "convert", db_path, "rows", "id", "--multi", code, ], ) assert result.exit_code == 0, result.output assert list(db["rows"].rows) == [ {"id": 1, "is_str": "", "is_float": 1.2, "is_int": None, "is_bytes": None}, {"id": 2, "is_str": None, "is_float": 1.0, "is_int": 12, "is_bytes": None}, { "id": 3, "is_str": None, "is_float": None, "is_int": None, "is_bytes": b"blah", }, {"id": 4, "is_str": None, "is_float": None, "is_int": None, "is_bytes": None}, ] assert db["rows"].schema == ( "CREATE TABLE [rows] (\n" " [id] INTEGER PRIMARY KEY\n" ", [is_str] TEXT, [is_float] FLOAT, [is_int] INTEGER, [is_bytes] BLOB)" ) @pytest.mark.parametrize("delimiter", [None, ";", "-"]) def test_recipe_jsonsplit(tmpdir, delimiter): db_path = str(pathlib.Path(tmpdir) / "data.db") db = sqlite_utils.Database(db_path) db["example"].insert_all( [ {"id": 1, "tags": (delimiter or ",").join(["foo", "bar"])}, {"id": 2, "tags": (delimiter or ",").join(["bar", "baz"])}, ], pk="id", ) code = "r.jsonsplit(value)" if delimiter: code = 'recipes.jsonsplit(value, delimiter="{}")'.format(delimiter) args = ["convert", db_path, "example", "tags", code] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output assert list(db["example"].rows) == [ {"id": 1, "tags": '["foo", "bar"]'}, {"id": 2, "tags": '["bar", "baz"]'}, ] @pytest.mark.parametrize( "type,expected_array", ( (None, ["1", "2", "3"]), ("float", [1.0, 2.0, 3.0]), ("int", [1, 2, 3]), ), ) def test_recipe_jsonsplit_type(fresh_db_and_path, type, expected_array): db, db_path = fresh_db_and_path db["example"].insert_all( [ {"id": 1, "records": "1,2,3"}, ], pk="id", ) code = "r.jsonsplit(value)" if type: code = "recipes.jsonsplit(value, type={})".format(type) args = ["convert", db_path, "example", "records", code] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output assert json.loads(db["example"].get(1)["records"]) == expected_array @pytest.mark.parametrize("drop", (True, False)) def test_recipe_jsonsplit_output(fresh_db_and_path, drop): db, db_path = fresh_db_and_path db["example"].insert_all( [ {"id": 1, "records": "1,2,3"}, ], pk="id", ) code = "r.jsonsplit(value)" args = ["convert", db_path, "example", "records", code, "--output", "tags"] if drop: args += ["--drop"] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 0, result.output expected = { "id": 1, "records": "1,2,3", "tags": '["1", "2", "3"]', } if drop: del expected["records"] assert db["example"].get(1) == expected def test_cannot_use_drop_without_multi_or_output(fresh_db_and_path): args = ["convert", fresh_db_and_path[1], "example", "records", "value", "--drop"] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 1, result.output assert "Error: --drop can only be used with --output or --multi" in result.output def test_cannot_use_multi_with_more_than_one_column(fresh_db_and_path): args = [ "convert", fresh_db_and_path[1], "example", "records", "othercol", "value", "--multi", ] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 1, result.output assert "Error: Cannot use --multi with more than one column" in result.output def test_multi_with_bad_function(test_db_and_path): args = [ "convert", test_db_and_path[1], "example", "dt", "value.upper()", "--multi", ] result = CliRunner().invoke(cli.cli, args) assert result.exit_code == 1, result.output assert "When using --multi code must return a Python dictionary" in result.output def test_convert_where(test_db_and_path): db, db_path = test_db_and_path result = CliRunner().invoke( cli.cli, [ "convert", db_path, "example", "dt", "str(value).upper()", "--where", "id = :id", "-p", "id", 2, ], ) assert result.exit_code == 0, result.output assert list(db["example"].rows) == [ {"id": 1, "dt": "5th October 2019 12:04"}, {"id": 2, "dt": "6TH OCTOBER 2019 00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ] def test_convert_where_multi(fresh_db_and_path): db, db_path = fresh_db_and_path db["names"].insert_all( [{"id": 1, "name": "Cleo"}, {"id": 2, "name": "Bants"}], pk="id" ) result = CliRunner().invoke( cli.cli, [ "convert", db_path, "names", "name", '{"upper": value.upper()}', "--where", "id = :id", "-p", "id", 2, "--multi", ], ) assert result.exit_code == 0, result.output assert list(db["names"].rows) == [ {"id": 1, "name": "Cleo", "upper": None}, {"id": 2, "name": "Bants", "upper": "BANTS"}, ] def test_convert_code_standard_input(fresh_db_and_path): db, db_path = fresh_db_and_path db["names"].insert_all([{"id": 1, "name": "Cleo"}], pk="id") result = CliRunner().invoke( cli.cli, [ "convert", db_path, "names", "name", "-", ], input="value.upper()", ) assert result.exit_code == 0, result.output assert list(db["names"].rows) == [ {"id": 1, "name": "CLEO"}, ] def test_convert_hyphen_workaround(fresh_db_and_path): db, db_path = fresh_db_and_path db["names"].insert_all([{"id": 1, "name": "Cleo"}], pk="id") result = CliRunner().invoke( cli.cli, ["convert", db_path, "names", "name", '"-"'], ) assert result.exit_code == 0, result.output assert list(db["names"].rows) == [ {"id": 1, "name": "-"}, ] def test_convert_initialization_pattern(fresh_db_and_path): db, db_path = fresh_db_and_path db["names"].insert_all([{"id": 1, "name": "Cleo"}], pk="id") result = CliRunner().invoke( cli.cli, [ "convert", db_path, "names", "name", "-", ], input="import random\nrandom.seed(1)\ndef convert(value): return random.randint(0, 100)", ) assert result.exit_code == 0, result.output assert list(db["names"].rows) == [ {"id": 1, "name": "17"}, ] @pytest.mark.parametrize( "no_skip_false,expected", ( (True, 1), (False, 0), ), ) def test_convert_no_skip_false(fresh_db_and_path, no_skip_false, expected): db, db_path = fresh_db_and_path args = [ "convert", db_path, "t", "x", "-", ] if no_skip_false: args.append("--no-skip-false") db["t"].insert_all([{"x": 0}, {"x": 1}]) assert db["t"].get(1)["x"] == 0 assert db["t"].get(2)["x"] == 1 result = CliRunner().invoke(cli.cli, args, input="value + 1") assert result.exit_code == 0, result.output assert db["t"].get(1)["x"] == expected assert db["t"].get(2)["x"] == 2 sqlite-utils-3.35.2/tests/test_cli_insert.py000066400000000000000000000442411452131415600211600ustar00rootroot00000000000000from sqlite_utils import cli, Database from click.testing import CliRunner import json import pytest import subprocess import sys import time def test_insert_simple(tmpdir): json_path = str(tmpdir / "dog.json") db_path = str(tmpdir / "dogs.db") with open(json_path, "w") as fp: fp.write(json.dumps({"name": "Cleo", "age": 4})) result = CliRunner().invoke(cli.cli, ["insert", db_path, "dogs", json_path]) assert result.exit_code == 0 assert [{"age": 4, "name": "Cleo"}] == list( Database(db_path).query("select * from dogs") ) db = Database(db_path) assert ["dogs"] == db.table_names() assert [] == db["dogs"].indexes def test_insert_from_stdin(tmpdir): db_path = str(tmpdir / "dogs.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", "-"], input=json.dumps({"name": "Cleo", "age": 4}), ) assert result.exit_code == 0 assert [{"age": 4, "name": "Cleo"}] == list( Database(db_path).query("select * from dogs") ) def test_insert_invalid_json_error(tmpdir): db_path = str(tmpdir / "dogs.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", "-"], input="name,age\nCleo,4", ) assert result.exit_code == 1 assert result.output == ( "Error: Invalid JSON - use --csv for CSV or --tsv for TSV files\n\n" "JSON error: Expecting value: line 1 column 1 (char 0)\n" ) def test_insert_json_flatten(tmpdir): db_path = str(tmpdir / "flat.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "items", "-", "--flatten"], input=json.dumps({"nested": {"data": 4}}), ) assert result.exit_code == 0 assert list(Database(db_path).query("select * from items")) == [{"nested_data": 4}] def test_insert_json_flatten_nl(tmpdir): db_path = str(tmpdir / "flat.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "items", "-", "--flatten", "--nl"], input="\n".join( json.dumps(item) for item in [{"nested": {"data": 4}}, {"nested": {"other": 3}}] ), ) assert result.exit_code == 0 assert list(Database(db_path).query("select * from items")) == [ {"nested_data": 4, "nested_other": None}, {"nested_data": None, "nested_other": 3}, ] def test_insert_with_primary_key(db_path, tmpdir): json_path = str(tmpdir / "dog.json") with open(json_path, "w") as fp: fp.write(json.dumps({"id": 1, "name": "Cleo", "age": 4})) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"] ) assert result.exit_code == 0 assert [{"id": 1, "age": 4, "name": "Cleo"}] == list( Database(db_path).query("select * from dogs") ) db = Database(db_path) assert ["id"] == db["dogs"].pks def test_insert_multiple_with_primary_key(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [{"id": i, "name": "Cleo {}".format(i), "age": i + 3} for i in range(1, 21)] with open(json_path, "w") as fp: fp.write(json.dumps(dogs)) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"] ) assert result.exit_code == 0 db = Database(db_path) assert dogs == list(db.query("select * from dogs order by id")) assert ["id"] == db["dogs"].pks def test_insert_multiple_with_compound_primary_key(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [ {"breed": "mixed", "id": i, "name": "Cleo {}".format(i), "age": i + 3} for i in range(1, 21) ] with open(json_path, "w") as fp: fp.write(json.dumps(dogs)) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id", "--pk", "breed"] ) assert result.exit_code == 0 db = Database(db_path) assert dogs == list(db.query("select * from dogs order by breed, id")) assert {"breed", "id"} == set(db["dogs"].pks) assert ( "CREATE TABLE [dogs] (\n" " [breed] TEXT,\n" " [id] INTEGER,\n" " [name] TEXT,\n" " [age] INTEGER,\n" " PRIMARY KEY ([id], [breed])\n" ")" ) == db["dogs"].schema def test_insert_not_null_default(db_path, tmpdir): json_path = str(tmpdir / "dogs.json") dogs = [ {"id": i, "name": "Cleo {}".format(i), "age": i + 3, "score": 10} for i in range(1, 21) ] with open(json_path, "w") as fp: fp.write(json.dumps(dogs)) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"] + ["--not-null", "name", "--not-null", "age"] + ["--default", "score", "5", "--default", "age", "1"], ) assert result.exit_code == 0 db = Database(db_path) assert ( "CREATE TABLE [dogs] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT NOT NULL,\n" " [age] INTEGER NOT NULL DEFAULT '1',\n" " [score] INTEGER DEFAULT '5'\n)" ) == db["dogs"].schema def test_insert_binary_base64(db_path): result = CliRunner().invoke( cli.cli, ["insert", db_path, "files", "-"], input=r'{"content": {"$base64": true, "encoded": "aGVsbG8="}}', ) assert result.exit_code == 0, result.output db = Database(db_path) actual = list(db.query("select content from files")) assert actual == [{"content": b"hello"}] def test_insert_newline_delimited(db_path): result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_json_nl", "-", "--nl"], input='{"foo": "bar", "n": 1}\n\n{"foo": "baz", "n": 2}', ) assert result.exit_code == 0, result.output db = Database(db_path) assert [ {"foo": "bar", "n": 1}, {"foo": "baz", "n": 2}, ] == list(db.query("select foo, n from from_json_nl")) def test_insert_ignore(db_path, tmpdir): db = Database(db_path) db["dogs"].insert({"id": 1, "name": "Cleo"}, pk="id") json_path = str(tmpdir / "dogs.json") with open(json_path, "w") as fp: fp.write(json.dumps([{"id": 1, "name": "Bailey"}])) # Should raise error without --ignore result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id"] ) assert result.exit_code != 0, result.output # If we use --ignore it should run OK result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id", "--ignore"] ) assert result.exit_code == 0, result.output # ... but it should actually have no effect assert [{"id": 1, "name": "Cleo"}] == list(db.query("select * from dogs")) @pytest.mark.parametrize( "content,options", [ ("foo\tbar\tbaz\n1\t2\tcat,dog", ["--tsv"]), ('foo,bar,baz\n1,2,"cat,dog"', ["--csv"]), ('foo;bar;baz\n1;2;"cat,dog"', ["--csv", "--delimiter", ";"]), # --delimiter implies --csv: ('foo;bar;baz\n1;2;"cat,dog"', ["--delimiter", ";"]), ("foo,bar,baz\n1,2,|cat,dog|", ["--csv", "--quotechar", "|"]), ("foo,bar,baz\n1,2,|cat,dog|", ["--quotechar", "|"]), ], ) def test_insert_csv_tsv(content, options, db_path, tmpdir): db = Database(db_path) file_path = str(tmpdir / "insert.csv-tsv") with open(file_path, "w") as fp: fp.write(content) result = CliRunner().invoke( cli.cli, ["insert", db_path, "data", file_path] + options, catch_exceptions=False, ) assert result.exit_code == 0 assert [{"foo": "1", "bar": "2", "baz": "cat,dog"}] == list(db["data"].rows) @pytest.mark.parametrize("empty_null", (True, False)) def test_insert_csv_empty_null(db_path, empty_null): options = ["--csv"] if empty_null: options.append("--empty-null") result = CliRunner().invoke( cli.cli, ["insert", db_path, "data", "-"] + options, catch_exceptions=False, input="foo,bar,baz\n1,,cat,dog", ) assert result.exit_code == 0 db = Database(db_path) assert [r for r in db["data"].rows] == [ {"foo": "1", "bar": None if empty_null else "", "baz": "cat"} ] @pytest.mark.parametrize( "input,args", ( ( json.dumps( [{"name": "One"}, {"name": "Two"}, {"name": "Three"}, {"name": "Four"}] ), [], ), ("name\nOne\nTwo\nThree\nFour\n", ["--csv"]), ), ) def test_insert_stop_after(tmpdir, input, args): db_path = str(tmpdir / "data.db") result = CliRunner().invoke( cli.cli, ["insert", db_path, "rows", "-", "--stop-after", "2"] + args, input=input, ) assert result.exit_code == 0 assert [{"name": "One"}, {"name": "Two"}] == list( Database(db_path).query("select * from rows") ) @pytest.mark.parametrize( "options", ( ["--tsv", "--nl"], ["--tsv", "--csv"], ["--csv", "--nl"], ["--csv", "--nl", "--tsv"], ), ) def test_only_allow_one_of_nl_tsv_csv(options, db_path, tmpdir): file_path = str(tmpdir / "insert.csv-tsv") with open(file_path, "w") as fp: fp.write("foo") result = CliRunner().invoke( cli.cli, ["insert", db_path, "data", file_path] + options ) assert result.exit_code != 0 assert "Error: Use just one of --nl, --csv or --tsv" == result.output.strip() def test_insert_replace(db_path, tmpdir): test_insert_multiple_with_primary_key(db_path, tmpdir) json_path = str(tmpdir / "insert-replace.json") db = Database(db_path) assert db["dogs"].count == 20 insert_replace_dogs = [ {"id": 1, "name": "Insert replaced 1", "age": 4}, {"id": 2, "name": "Insert replaced 2", "age": 4}, {"id": 21, "name": "Fresh insert 21", "age": 6}, ] with open(json_path, "w") as fp: fp.write(json.dumps(insert_replace_dogs)) result = CliRunner().invoke( cli.cli, ["insert", db_path, "dogs", json_path, "--pk", "id", "--replace"] ) assert result.exit_code == 0, result.output assert db["dogs"].count == 21 assert ( list(db.query("select * from dogs where id in (1, 2, 21) order by id")) == insert_replace_dogs ) def test_insert_truncate(db_path): result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_json_nl", "-", "--nl", "--batch-size=1"], input='{"foo": "bar", "n": 1}\n{"foo": "baz", "n": 2}', ) assert result.exit_code == 0, result.output db = Database(db_path) assert [ {"foo": "bar", "n": 1}, {"foo": "baz", "n": 2}, ] == list(db.query("select foo, n from from_json_nl")) # Truncate and insert new rows result = CliRunner().invoke( cli.cli, [ "insert", db_path, "from_json_nl", "-", "--nl", "--truncate", "--batch-size=1", ], input='{"foo": "bam", "n": 3}\n{"foo": "bat", "n": 4}', ) assert result.exit_code == 0, result.output assert [ {"foo": "bam", "n": 3}, {"foo": "bat", "n": 4}, ] == list(db.query("select foo, n from from_json_nl")) def test_insert_alter(db_path, tmpdir): result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_json_nl", "-", "--nl"], input='{"foo": "bar", "n": 1}\n{"foo": "baz", "n": 2}', ) assert result.exit_code == 0, result.output # Should get an error with incorrect shaped additional data result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_json_nl", "-", "--nl"], input='{"foo": "bar", "baz": 5}', ) assert result.exit_code != 0, result.output # If we run it again with --alter it should work correctly result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_json_nl", "-", "--nl", "--alter"], input='{"foo": "bar", "baz": 5}', ) assert result.exit_code == 0, result.output # Soundness check the database itself db = Database(db_path) assert {"foo": str, "n": int, "baz": int} == db["from_json_nl"].columns_dict assert [ {"foo": "bar", "n": 1, "baz": None}, {"foo": "baz", "n": 2, "baz": None}, {"foo": "bar", "baz": 5, "n": None}, ] == list(db.query("select foo, n, baz from from_json_nl")) def test_insert_analyze(db_path): db = Database(db_path) db["rows"].insert({"foo": "x", "n": 3}) db["rows"].create_index(["n"]) assert "sqlite_stat1" not in db.table_names() result = CliRunner().invoke( cli.cli, ["insert", db_path, "rows", "-", "--nl", "--analyze"], input='{"foo": "bar", "n": 1}\n{"foo": "baz", "n": 2}', ) assert result.exit_code == 0, result.output assert "sqlite_stat1" in db.table_names() def test_insert_lines(db_path): result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_lines", "-", "--lines"], input='First line\nSecond line\n{"foo": "baz"}', ) assert result.exit_code == 0, result.output db = Database(db_path) assert [ {"line": "First line"}, {"line": "Second line"}, {"line": '{"foo": "baz"}'}, ] == list(db.query("select line from from_lines")) def test_insert_text(db_path): result = CliRunner().invoke( cli.cli, ["insert", db_path, "from_text", "-", "--text"], input='First line\nSecond line\n{"foo": "baz"}', ) assert result.exit_code == 0, result.output db = Database(db_path) assert [{"text": 'First line\nSecond line\n{"foo": "baz"}'}] == list( db.query("select text from from_text") ) @pytest.mark.parametrize( "options,input", ( ([], '[{"id": "1", "name": "Bob"}, {"id": "2", "name": "Cat"}]'), (["--csv"], "id,name\n1,Bob\n2,Cat"), (["--nl"], '{"id": "1", "name": "Bob"}\n{"id": "2", "name": "Cat"}'), ), ) def test_insert_convert_json_csv_jsonnl(db_path, options, input): result = CliRunner().invoke( cli.cli, ["insert", db_path, "rows", "-", "--convert", '{**row, **{"extra": 1}}'] + options, input=input, ) assert result.exit_code == 0, result.output db = Database(db_path) rows = list(db.query("select id, name, extra from rows")) assert rows == [ {"id": "1", "name": "Bob", "extra": 1}, {"id": "2", "name": "Cat", "extra": 1}, ] def test_insert_convert_text(db_path): result = CliRunner().invoke( cli.cli, [ "insert", db_path, "text", "-", "--text", "--convert", '{"text": text.upper()}', ], input="This is text\nwill be upper now", ) assert result.exit_code == 0, result.output db = Database(db_path) rows = list(db.query("select [text] from [text]")) assert rows == [{"text": "THIS IS TEXT\nWILL BE UPPER NOW"}] def test_insert_convert_text_returning_iterator(db_path): result = CliRunner().invoke( cli.cli, [ "insert", db_path, "text", "-", "--text", "--convert", '({"word": w} for w in text.split())', ], input="A bunch of words", ) assert result.exit_code == 0, result.output db = Database(db_path) rows = list(db.query("select [word] from [text]")) assert rows == [{"word": "A"}, {"word": "bunch"}, {"word": "of"}, {"word": "words"}] def test_insert_convert_lines(db_path): result = CliRunner().invoke( cli.cli, [ "insert", db_path, "all", "-", "--lines", "--convert", '{"line": line.upper()}', ], input="This is text\nwill be upper now", ) assert result.exit_code == 0, result.output db = Database(db_path) rows = list(db.query("select [line] from [all]")) assert rows == [{"line": "THIS IS TEXT"}, {"line": "WILL BE UPPER NOW"}] def test_insert_convert_row_modifying_in_place(db_path): result = CliRunner().invoke( cli.cli, [ "insert", db_path, "rows", "-", "--convert", 'row["is_chicken"] = True', ], input='{"name": "Azi"}', ) assert result.exit_code == 0, result.output db = Database(db_path) rows = list(db.query("select name, is_chicken from rows")) assert rows == [{"name": "Azi", "is_chicken": 1}] @pytest.mark.parametrize( "options,expected_error", ( ( ["--text", "--convert", "1"], "Error: --convert must return dict or iterator\n", ), (["--convert", "1"], "Error: Rows must all be dictionaries, got: 1\n"), ), ) def test_insert_convert_error_messages(db_path, options, expected_error): result = CliRunner().invoke( cli.cli, [ "insert", db_path, "rows", "-", ] + options, input='{"name": "Azi"}', ) assert result.exit_code == 1 assert result.output == expected_error def test_insert_streaming_batch_size_1(db_path): # https://github.com/simonw/sqlite-utils/issues/364 # Streaming with --batch-size 1 should commit on each record # Can't use CliRunner().invoke() here bacuse we need to # run assertions in between writing to process stdin proc = subprocess.Popen( [ sys.executable, "-m", "sqlite_utils", "insert", db_path, "rows", "-", "--nl", "--batch-size", "1", ], stdin=subprocess.PIPE, stdout=sys.stdout, ) proc.stdin.write(b'{"name": "Azi"}\n') proc.stdin.flush() def try_until(expected): tries = 0 while True: rows = list(Database(db_path)["rows"].rows) if rows == expected: return tries += 1 if tries > 10: assert False, "Expected {}, got {}".format(expected, rows) time.sleep(tries * 0.1) try_until([{"name": "Azi"}]) proc.stdin.write(b'{"name": "Suna"}\n') proc.stdin.flush() try_until([{"name": "Azi"}, {"name": "Suna"}]) proc.stdin.close() proc.wait() assert proc.returncode == 0 sqlite-utils-3.35.2/tests/test_cli_memory.py000066400000000000000000000220031452131415600211540ustar00rootroot00000000000000import json import pytest from click.testing import CliRunner from sqlite_utils import Database, cli def test_memory_basic(): result = CliRunner().invoke(cli.cli, ["memory", "select 1 + 1"]) assert result.exit_code == 0 assert result.output.strip() == '[{"1 + 1": 2}]' @pytest.mark.parametrize("sql_from", ("test", "t", "t1")) @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_csv(tmpdir, sql_from, use_stdin): content = "id,name\n1,Cleo\n2,Bants" input = None if use_stdin: input = content csv_path = "-" if sql_from == "test": sql_from = "stdin" else: csv_path = str(tmpdir / "test.csv") with open(csv_path, "w") as fp: fp.write(content) result = CliRunner().invoke( cli.cli, ["memory", csv_path, "select * from {}".format(sql_from), "--nl"], input=input, ) assert result.exit_code == 0 assert ( result.output.strip() == '{"id": 1, "name": "Cleo"}\n{"id": 2, "name": "Bants"}' ) @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_tsv(tmpdir, use_stdin): data = "id\tname\n1\tCleo\n2\tBants" if use_stdin: input = data path = "stdin:tsv" sql_from = "stdin" else: input = None path = str(tmpdir / "chickens.tsv") with open(path, "w") as fp: fp.write(data) path = path + ":tsv" sql_from = "chickens" result = CliRunner().invoke( cli.cli, ["memory", path, "select * from {}".format(sql_from)], input=input, ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == [ {"id": 1, "name": "Cleo"}, {"id": 2, "name": "Bants"}, ] @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_json(tmpdir, use_stdin): data = '[{"name": "Bants"}, {"name": "Dori", "age": 1, "nested": {"nest": 1}}]' if use_stdin: input = data path = "stdin:json" sql_from = "stdin" else: input = None path = str(tmpdir / "chickens.json") with open(path, "w") as fp: fp.write(data) path = path + ":json" sql_from = "chickens" result = CliRunner().invoke( cli.cli, ["memory", path, "select * from {}".format(sql_from)], input=input, ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == [ {"name": "Bants", "age": None, "nested": None}, {"name": "Dori", "age": 1, "nested": '{"nest": 1}'}, ] @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_json_nl(tmpdir, use_stdin): data = '{"name": "Bants"}\n\n{"name": "Dori"}' if use_stdin: input = data path = "stdin:nl" sql_from = "stdin" else: input = None path = str(tmpdir / "chickens.json") with open(path, "w") as fp: fp.write(data) path = path + ":nl" sql_from = "chickens" result = CliRunner().invoke( cli.cli, ["memory", path, "select * from {}".format(sql_from)], input=input, ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == [ {"name": "Bants"}, {"name": "Dori"}, ] @pytest.mark.parametrize("use_stdin", (True, False)) def test_memory_csv_encoding(tmpdir, use_stdin): latin1_csv = ( b"date,name,latitude,longitude\n" b"2020-03-04,S\xe3o Paulo,-23.561,-46.645\n" ) input = None if use_stdin: input = latin1_csv csv_path = "-" sql_from = "stdin" else: csv_path = str(tmpdir / "test.csv") with open(csv_path, "wb") as fp: fp.write(latin1_csv) sql_from = "test" # Without --encoding should error: assert ( CliRunner() .invoke( cli.cli, ["memory", csv_path, "select * from {}".format(sql_from), "--nl"], input=input, ) .exit_code == 1 ) # With --encoding should work: result = CliRunner().invoke( cli.cli, ["memory", "-", "select * from stdin", "--encoding", "latin-1", "--nl"], input=latin1_csv, ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == { "date": "2020-03-04", "name": "São Paulo", "latitude": -23.561, "longitude": -46.645, } @pytest.mark.parametrize("extra_args", ([], ["select 1"])) def test_memory_dump(extra_args): result = CliRunner().invoke( cli.cli, ["memory", "-"] + extra_args + ["--dump"], input="id,name\n1,Cleo\n2,Bants", ) assert result.exit_code == 0 expected = ( "BEGIN TRANSACTION;\n" 'CREATE TABLE IF NOT EXISTS "stdin" (\n' " [id] INTEGER,\n" " [name] TEXT\n" ");\n" "INSERT INTO \"stdin\" VALUES(1,'Cleo');\n" "INSERT INTO \"stdin\" VALUES(2,'Bants');\n" "CREATE VIEW t1 AS select * from [stdin];\n" "CREATE VIEW t AS select * from [stdin];\n" "COMMIT;" ) # Using sqlite-dump it won't have IF NOT EXISTS expected_alternative = expected.replace("IF NOT EXISTS ", "") assert result.output.strip() in (expected, expected_alternative) @pytest.mark.parametrize("extra_args", ([], ["select 1"])) def test_memory_schema(extra_args): result = CliRunner().invoke( cli.cli, ["memory", "-"] + extra_args + ["--schema"], input="id,name\n1,Cleo\n2,Bants", ) assert result.exit_code == 0 assert result.output.strip() == ( 'CREATE TABLE "stdin" (\n' " [id] INTEGER,\n" " [name] TEXT\n" ");\n" "CREATE VIEW t1 AS select * from [stdin];\n" "CREATE VIEW t AS select * from [stdin];" ) @pytest.mark.parametrize("extra_args", ([], ["select 1"])) def test_memory_save(tmpdir, extra_args): save_to = str(tmpdir / "save.db") result = CliRunner().invoke( cli.cli, ["memory", "-"] + extra_args + ["--save", save_to], input="id,name\n1,Cleo\n2,Bants", ) assert result.exit_code == 0 db = Database(save_to) assert list(db["stdin"].rows) == [ {"id": 1, "name": "Cleo"}, {"id": 2, "name": "Bants"}, ] @pytest.mark.parametrize("option", ("-n", "--no-detect-types")) def test_memory_no_detect_types(option): result = CliRunner().invoke( cli.cli, ["memory", "-", "select * from stdin"] + [option], input="id,name,weight\n1,Cleo,45.5\n2,Bants,3.5", ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == [ {"id": "1", "name": "Cleo", "weight": "45.5"}, {"id": "2", "name": "Bants", "weight": "3.5"}, ] def test_memory_flatten(): result = CliRunner().invoke( cli.cli, ["memory", "-", "select * from stdin", "--flatten"], input=json.dumps( { "httpRequest": { "latency": "0.112114537s", "requestMethod": "GET", }, "insertId": "6111722f000b5b4c4d4071e2", } ), ) assert result.exit_code == 0, result.output assert json.loads(result.output.strip()) == [ { "httpRequest_latency": "0.112114537s", "httpRequest_requestMethod": "GET", "insertId": "6111722f000b5b4c4d4071e2", } ] def test_memory_analyze(): result = CliRunner().invoke( cli.cli, ["memory", "-", "--analyze"], input="id,name\n1,Cleo\n2,Bants", ) assert result.exit_code == 0 assert result.output == ( "stdin.id: (1/2)\n\n" " Total rows: 2\n" " Null rows: 0\n" " Blank rows: 0\n\n" " Distinct values: 2\n\n" "stdin.name: (2/2)\n\n" " Total rows: 2\n" " Null rows: 0\n" " Blank rows: 0\n\n" " Distinct values: 2\n\n" ) def test_memory_two_files_with_same_stem(tmpdir): (tmpdir / "one").mkdir() (tmpdir / "two").mkdir() one = tmpdir / "one" / "data.csv" two = tmpdir / "two" / "data.csv" one.write_text("id,name\n1,Cleo\n2,Bants", encoding="utf-8") two.write_text("id,name\n3,Blue\n4,Lila", encoding="utf-8") result = CliRunner().invoke(cli.cli, ["memory", str(one), str(two), "", "--schema"]) assert result.exit_code == 0 assert result.output == ( 'CREATE TABLE "data" (\n' " [id] INTEGER,\n" " [name] TEXT\n" ");\n" "CREATE VIEW t1 AS select * from [data];\n" "CREATE VIEW t AS select * from [data];\n" 'CREATE TABLE "data_2" (\n' " [id] INTEGER,\n" " [name] TEXT\n" ");\n" "CREATE VIEW t2 AS select * from [data_2];\n" ) def test_memory_functions(): result = CliRunner().invoke( cli.cli, ["memory", "select hello()", "--functions", "hello = lambda: 'Hello'"], ) assert result.exit_code == 0 assert result.output.strip() == '[{"hello()": "Hello"}]' sqlite-utils-3.35.2/tests/test_column_affinity.py000066400000000000000000000024051452131415600222070ustar00rootroot00000000000000import pytest from sqlite_utils.utils import column_affinity EXAMPLES = [ # Examples from https://www.sqlite.org/datatype3.html#affinity_name_examples ("INT", int), ("INTEGER", int), ("TINYINT", int), ("SMALLINT", int), ("MEDIUMINT", int), ("BIGINT", int), ("UNSIGNED BIG INT", int), ("INT2", int), ("INT8", int), ("CHARACTER(20)", str), ("VARCHAR(255)", str), ("VARYING CHARACTER(255)", str), ("NCHAR(55)", str), ("NATIVE CHARACTER(70)", str), ("NVARCHAR(100)", str), ("TEXT", str), ("CLOB", str), ("BLOB", bytes), ("REAL", float), ("DOUBLE", float), ("DOUBLE PRECISION", float), ("FLOAT", float), # Numeric, treated as float: ("NUMERIC", float), ("DECIMAL(10,5)", float), ("BOOLEAN", float), ("DATE", float), ("DATETIME", float), ] @pytest.mark.parametrize("column_def,expected_type", EXAMPLES) def test_column_affinity(column_def, expected_type): assert expected_type is column_affinity(column_def) @pytest.mark.parametrize("column_def,expected_type", EXAMPLES) def test_columns_dict(fresh_db, column_def, expected_type): fresh_db.execute("create table foo (col {})".format(column_def)) assert {"col": expected_type} == fresh_db["foo"].columns_dict sqlite-utils-3.35.2/tests/test_constructor.py000066400000000000000000000022731452131415600214110ustar00rootroot00000000000000from sqlite_utils import Database from sqlite_utils.utils import sqlite3 import pytest def test_recursive_triggers(): db = Database(memory=True) assert db.execute("PRAGMA recursive_triggers").fetchone()[0] def test_recursive_triggers_off(): db = Database(memory=True, recursive_triggers=False) assert not db.execute("PRAGMA recursive_triggers").fetchone()[0] def test_memory_name(): db1 = Database(memory_name="shared") db2 = Database(memory_name="shared") db1["dogs"].insert({"name": "Cleo"}) assert list(db2["dogs"].rows) == [{"name": "Cleo"}] def test_sqlite_version(): db = Database(memory=True) version = db.sqlite_version assert isinstance(version, tuple) as_string = ".".join(map(str, version)) actual = next(db.query("select sqlite_version() as v"))["v"] assert actual == as_string @pytest.mark.parametrize("memory", [True, False]) def test_database_close(tmpdir, memory): if memory: db = Database(memory=True) else: db = Database(str(tmpdir / "test.db")) assert db.execute("select 1 + 1").fetchone()[0] == 2 db.close() with pytest.raises(sqlite3.ProgrammingError): db.execute("select 1 + 1") sqlite-utils-3.35.2/tests/test_conversions.py000066400000000000000000000026531452131415600213760ustar00rootroot00000000000000def test_insert_conversion(fresh_db): table = fresh_db["table"] table.insert({"foo": "bar"}, conversions={"foo": "upper(?)"}) assert [{"foo": "BAR"}] == list(table.rows) def test_insert_all_conversion(fresh_db): table = fresh_db["table"] table.insert_all([{"foo": "bar"}], conversions={"foo": "upper(?)"}) assert [{"foo": "BAR"}] == list(table.rows) def test_upsert_conversion(fresh_db): table = fresh_db["table"] table.upsert({"id": 1, "foo": "bar"}, pk="id", conversions={"foo": "upper(?)"}) assert [{"id": 1, "foo": "BAR"}] == list(table.rows) table.upsert( {"id": 1, "bar": "baz"}, pk="id", conversions={"bar": "upper(?)"}, alter=True ) assert [{"id": 1, "foo": "BAR", "bar": "BAZ"}] == list(table.rows) def test_upsert_all_conversion(fresh_db): table = fresh_db["table"] table.upsert_all( [{"id": 1, "foo": "bar"}], pk="id", conversions={"foo": "upper(?)"} ) assert [{"id": 1, "foo": "BAR"}] == list(table.rows) def test_update_conversion(fresh_db): table = fresh_db["table"] table.insert({"id": 5, "foo": "bar"}, pk="id") table.update(5, {"foo": "baz"}, conversions={"foo": "upper(?)"}) assert [{"id": 5, "foo": "BAZ"}] == list(table.rows) def test_table_constructor_conversion(fresh_db): table = fresh_db.table("table", conversions={"bar": "upper(?)"}) table.insert({"bar": "baz"}) assert [{"bar": "BAZ"}] == list(table.rows) sqlite-utils-3.35.2/tests/test_convert.py000066400000000000000000000105411452131415600205010ustar00rootroot00000000000000from sqlite_utils.db import BadMultiValues import pytest @pytest.mark.parametrize( "columns,fn,expected", ( ( "title", lambda value: value.upper(), {"title": "MIXED CASE", "abstract": "Abstract"}, ), ( ["title", "abstract"], lambda value: value.upper(), {"title": "MIXED CASE", "abstract": "ABSTRACT"}, ), ( "title", lambda value: {"upper": value.upper(), "lower": value.lower()}, { "title": '{"upper": "MIXED CASE", "lower": "mixed case"}', "abstract": "Abstract", }, ), ), ) def test_convert(fresh_db, columns, fn, expected): table = fresh_db["table"] table.insert({"title": "Mixed Case", "abstract": "Abstract"}) table.convert(columns, fn) assert list(table.rows) == [expected] @pytest.mark.parametrize( "where,where_args", (("id > 1", None), ("id > :id", {"id": 1}), ("id > ?", [1])) ) def test_convert_where(fresh_db, where, where_args): table = fresh_db["table"] table.insert_all( [ {"id": 1, "title": "One"}, {"id": 2, "title": "Two"}, ], pk="id", ) table.convert( "title", lambda value: value.upper(), where=where, where_args=where_args ) assert list(table.rows) == [{"id": 1, "title": "One"}, {"id": 2, "title": "TWO"}] def test_convert_skip_false(fresh_db): table = fresh_db["table"] table.insert_all([{"x": 0}, {"x": 1}]) assert table.get(1)["x"] == 0 assert table.get(2)["x"] == 1 table.convert("x", lambda x: x + 1, skip_false=False) assert table.get(1)["x"] == 1 assert table.get(2)["x"] == 2 @pytest.mark.parametrize( "drop,expected", ( (False, {"title": "Mixed Case", "other": "MIXED CASE"}), (True, {"other": "MIXED CASE"}), ), ) def test_convert_output(fresh_db, drop, expected): table = fresh_db["table"] table.insert({"title": "Mixed Case"}) table.convert("title", lambda v: v.upper(), output="other", drop=drop) assert list(table.rows) == [expected] def test_convert_output_multiple_column_error(fresh_db): table = fresh_db["table"] with pytest.raises(AssertionError) as excinfo: table.convert(["title", "other"], lambda v: v, output="out") assert "output= can only be used with a single column" in str(excinfo.value) @pytest.mark.parametrize( "type,expected", ( (int, {"other": 123}), (float, {"other": 123.0}), ), ) def test_convert_output_type(fresh_db, type, expected): table = fresh_db["table"] table.insert({"number": "123"}) table.convert("number", lambda v: v, output="other", output_type=type, drop=True) assert list(table.rows) == [expected] def test_convert_multi(fresh_db): table = fresh_db["table"] table.insert({"title": "Mixed Case"}) table.convert( "title", lambda v: { "upper": v.upper(), "lower": v.lower(), "both": { "upper": v.upper(), "lower": v.lower(), }, }, multi=True, ) assert list(table.rows) == [ { "title": "Mixed Case", "upper": "MIXED CASE", "lower": "mixed case", "both": '{"upper": "MIXED CASE", "lower": "mixed case"}', } ] def test_convert_multi_where(fresh_db): table = fresh_db["table"] table.insert_all( [ {"id": 1, "title": "One"}, {"id": 2, "title": "Two"}, ], pk="id", ) table.convert( "title", lambda v: {"upper": v.upper(), "lower": v.lower()}, multi=True, where="id > ?", where_args=[1], ) assert list(table.rows) == [ {"id": 1, "lower": None, "title": "One", "upper": None}, {"id": 2, "lower": "two", "title": "Two", "upper": "TWO"}, ] def test_convert_multi_exception(fresh_db): table = fresh_db["table"] table.insert({"title": "Mixed Case"}) with pytest.raises(BadMultiValues): table.convert("title", lambda v: v.upper(), multi=True) def test_convert_repeated(fresh_db): table = fresh_db["table"] col = "num" table.insert({col: 1}) table.convert(col, lambda x: x * 2) table.convert(col, lambda _x: 0) assert table.get(1) == {col: 0} sqlite-utils-3.35.2/tests/test_create.py000066400000000000000000001253531452131415600202740ustar00rootroot00000000000000from sqlite_utils.db import ( Index, Database, DescIndex, AlterError, NoObviousTable, OperationalError, ForeignKey, Table, View, ) from sqlite_utils.utils import hash_record, sqlite3 import collections import datetime import decimal import json import pathlib import pytest import uuid try: import pandas as pd # type: ignore except ImportError: pd = None # type: ignore def test_create_table(fresh_db): assert [] == fresh_db.table_names() table = fresh_db.create_table( "test_table", { "text_col": str, "float_col": float, "int_col": int, "bool_col": bool, "bytes_col": bytes, "datetime_col": datetime.datetime, }, ) assert ["test_table"] == fresh_db.table_names() assert [ {"name": "text_col", "type": "TEXT"}, {"name": "float_col", "type": "FLOAT"}, {"name": "int_col", "type": "INTEGER"}, {"name": "bool_col", "type": "INTEGER"}, {"name": "bytes_col", "type": "BLOB"}, {"name": "datetime_col", "type": "TEXT"}, ] == [{"name": col.name, "type": col.type} for col in table.columns] assert ( "CREATE TABLE [test_table] (\n" " [text_col] TEXT,\n" " [float_col] FLOAT,\n" " [int_col] INTEGER,\n" " [bool_col] INTEGER,\n" " [bytes_col] BLOB,\n" " [datetime_col] TEXT\n" ")" ) == table.schema def test_create_table_compound_primary_key(fresh_db): table = fresh_db.create_table( "test_table", {"id1": str, "id2": str, "value": int}, pk=("id1", "id2") ) assert ( "CREATE TABLE [test_table] (\n" " [id1] TEXT,\n" " [id2] TEXT,\n" " [value] INTEGER,\n" " PRIMARY KEY ([id1], [id2])\n" ")" ) == table.schema assert ["id1", "id2"] == table.pks @pytest.mark.parametrize("pk", ("id", ["id"])) def test_create_table_with_single_primary_key(fresh_db, pk): fresh_db["foo"].insert({"id": 1}, pk=pk) assert ( fresh_db["foo"].schema == "CREATE TABLE [foo] (\n [id] INTEGER PRIMARY KEY\n)" ) def test_create_table_with_invalid_column_characters(fresh_db): with pytest.raises(AssertionError): fresh_db.create_table("players", {"name[foo]": str}) def test_create_table_with_defaults(fresh_db): table = fresh_db.create_table( "players", {"name": str, "score": int}, defaults={"score": 1, "name": "bob''bob"}, ) assert ["players"] == fresh_db.table_names() assert [{"name": "name", "type": "TEXT"}, {"name": "score", "type": "INTEGER"}] == [ {"name": col.name, "type": col.type} for col in table.columns ] assert ( "CREATE TABLE [players] (\n [name] TEXT DEFAULT 'bob''''bob',\n [score] INTEGER DEFAULT 1\n)" ) == table.schema def test_create_table_with_bad_not_null(fresh_db): with pytest.raises(AssertionError): fresh_db.create_table( "players", {"name": str, "score": int}, not_null={"mouse"} ) def test_create_table_with_not_null(fresh_db): table = fresh_db.create_table( "players", {"name": str, "score": int}, not_null={"name", "score"}, defaults={"score": 3}, ) assert ["players"] == fresh_db.table_names() assert [{"name": "name", "type": "TEXT"}, {"name": "score", "type": "INTEGER"}] == [ {"name": col.name, "type": col.type} for col in table.columns ] assert ( "CREATE TABLE [players] (\n [name] TEXT NOT NULL,\n [score] INTEGER NOT NULL DEFAULT 3\n)" ) == table.schema @pytest.mark.parametrize( "example,expected_columns", ( ( {"name": "Ravi", "age": 63}, [{"name": "name", "type": "TEXT"}, {"name": "age", "type": "INTEGER"}], ), ( {"create": "Reserved word", "table": "Another"}, [{"name": "create", "type": "TEXT"}, {"name": "table", "type": "TEXT"}], ), ({"day": datetime.time(11, 0)}, [{"name": "day", "type": "TEXT"}]), ({"decimal": decimal.Decimal("1.2")}, [{"name": "decimal", "type": "FLOAT"}]), ( {"memoryview": memoryview(b"hello")}, [{"name": "memoryview", "type": "BLOB"}], ), ({"uuid": uuid.uuid4()}, [{"name": "uuid", "type": "TEXT"}]), ({"foo[bar]": 1}, [{"name": "foo_bar_", "type": "INTEGER"}]), ( {"timedelta": datetime.timedelta(hours=1)}, [{"name": "timedelta", "type": "TEXT"}], ), ), ) def test_create_table_from_example(fresh_db, example, expected_columns): people_table = fresh_db["people"] assert people_table.last_rowid is None assert people_table.last_pk is None people_table.insert(example) assert people_table.last_rowid == 1 assert people_table.last_pk == 1 assert ["people"] == fresh_db.table_names() assert expected_columns == [ {"name": col.name, "type": col.type} for col in fresh_db["people"].columns ] def test_create_table_from_example_with_compound_primary_keys(fresh_db): record = {"name": "Zhang", "group": "staff", "employee_id": 2} table = fresh_db["people"].insert(record, pk=("group", "employee_id")) assert ["group", "employee_id"] == table.pks assert record == table.get(("staff", 2)) @pytest.mark.parametrize( "method_name", ("insert", "upsert", "insert_all", "upsert_all") ) def test_create_table_with_custom_columns(fresh_db, method_name): table = fresh_db["dogs"] method = getattr(table, method_name) record = {"id": 1, "name": "Cleo", "age": "5"} if method_name.endswith("_all"): record = [record] method(record, pk="id", columns={"age": int, "weight": float}) assert ["dogs"] == fresh_db.table_names() expected_columns = [ {"name": "id", "type": "INTEGER"}, {"name": "name", "type": "TEXT"}, {"name": "age", "type": "INTEGER"}, {"name": "weight", "type": "FLOAT"}, ] assert expected_columns == [ {"name": col.name, "type": col.type} for col in table.columns ] assert [{"id": 1, "name": "Cleo", "age": 5, "weight": None}] == list(table.rows) @pytest.mark.parametrize("use_table_factory", [True, False]) def test_create_table_column_order(fresh_db, use_table_factory): row = collections.OrderedDict( ( ("zzz", "third"), ("abc", "first"), ("ccc", "second"), ("bbb", "second-to-last"), ("aaa", "last"), ) ) column_order = ("abc", "ccc", "zzz") if use_table_factory: fresh_db.table("table", column_order=column_order).insert(row) else: fresh_db["table"].insert(row, column_order=column_order) assert [ {"name": "abc", "type": "TEXT"}, {"name": "ccc", "type": "TEXT"}, {"name": "zzz", "type": "TEXT"}, {"name": "bbb", "type": "TEXT"}, {"name": "aaa", "type": "TEXT"}, ] == [{"name": col.name, "type": col.type} for col in fresh_db["table"].columns] @pytest.mark.parametrize( "foreign_key_specification,expected_exception", ( # You can specify triples, pairs, or a list of columns ((("one_id", "one", "id"), ("two_id", "two", "id")), False), ((("one_id", "one"), ("two_id", "two")), False), (("one_id", "two_id"), False), # You can also specify ForeignKey tuples: ( ( ForeignKey("m2m", "one_id", "one", "id"), ForeignKey("m2m", "two_id", "two", "id"), ), False, ), # If you specify a column that doesn't point to a table, you get an error: (("one_id", "two_id", "three_id"), NoObviousTable), # Tuples of the wrong length get an error: ((("one_id", "one", "id", "five"), ("two_id", "two", "id")), AssertionError), # Likewise a bad column: ((("one_id", "one", "id2"),), AlterError), # Or a list of dicts (({"one_id": "one"},), AssertionError), ), ) @pytest.mark.parametrize("use_table_factory", [True, False]) def test_create_table_works_for_m2m_with_only_foreign_keys( fresh_db, foreign_key_specification, expected_exception, use_table_factory ): if use_table_factory: fresh_db.table("one", pk="id").insert({"id": 1}) fresh_db.table("two", pk="id").insert({"id": 1}) else: fresh_db["one"].insert({"id": 1}, pk="id") fresh_db["two"].insert({"id": 1}, pk="id") row = {"one_id": 1, "two_id": 1} def do_it(): if use_table_factory: fresh_db.table("m2m", foreign_keys=foreign_key_specification).insert(row) else: fresh_db["m2m"].insert(row, foreign_keys=foreign_key_specification) if expected_exception: with pytest.raises(expected_exception): do_it() return else: do_it() assert [ {"name": "one_id", "type": "INTEGER"}, {"name": "two_id", "type": "INTEGER"}, ] == [{"name": col.name, "type": col.type} for col in fresh_db["m2m"].columns] assert sorted( [ {"column": "one_id", "other_table": "one", "other_column": "id"}, {"column": "two_id", "other_table": "two", "other_column": "id"}, ], key=lambda s: repr(s), ) == sorted( [ { "column": fk.column, "other_table": fk.other_table, "other_column": fk.other_column, } for fk in fresh_db["m2m"].foreign_keys ], key=lambda s: repr(s), ) def test_self_referential_foreign_key(fresh_db): assert [] == fresh_db.table_names() table = fresh_db.create_table( "test_table", columns={ "id": int, "ref": int, }, pk="id", foreign_keys=(("ref", "test_table", "id"),), ) assert ( "CREATE TABLE [test_table] (\n" " [id] INTEGER PRIMARY KEY,\n" " [ref] INTEGER REFERENCES [test_table]([id])\n" ")" ) == table.schema def test_create_error_if_invalid_foreign_keys(fresh_db): with pytest.raises(AlterError): fresh_db["one"].insert( {"id": 1, "ref_id": 3}, pk="id", foreign_keys=(("ref_id", "bad_table", "bad_column"),), ) def test_create_error_if_invalid_self_referential_foreign_keys(fresh_db): with pytest.raises(AlterError) as ex: fresh_db["one"].insert( {"id": 1, "ref_id": 3}, pk="id", foreign_keys=(("ref_id", "one", "bad_column"),), ) assert ex.value.args == ("No such column: one.bad_column",) @pytest.mark.parametrize( "col_name,col_type,not_null_default,expected_schema", ( ( "nickname", str, None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [nickname] TEXT)", ), ( "dob", datetime.date, None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [dob] TEXT)", ), ("age", int, None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [age] INTEGER)"), ( "weight", float, None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [weight] FLOAT)", ), ("text", "TEXT", None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [text] TEXT)"), ( "integer", "INTEGER", None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [integer] INTEGER)", ), ( "float", "FLOAT", None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [float] FLOAT)", ), ("blob", "blob", None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [blob] BLOB)"), ( "default_str", None, None, "CREATE TABLE [dogs] (\n [name] TEXT\n, [default_str] TEXT)", ), ( "nickname", str, "", "CREATE TABLE [dogs] (\n [name] TEXT\n, [nickname] TEXT NOT NULL DEFAULT '')", ), ( "nickname", str, "dawg's dawg", "CREATE TABLE [dogs] (\n [name] TEXT\n, [nickname] TEXT NOT NULL DEFAULT 'dawg''s dawg')", ), ), ) def test_add_column(fresh_db, col_name, col_type, not_null_default, expected_schema): fresh_db.create_table("dogs", {"name": str}) assert fresh_db["dogs"].schema == "CREATE TABLE [dogs] (\n [name] TEXT\n)" fresh_db["dogs"].add_column(col_name, col_type, not_null_default=not_null_default) assert fresh_db["dogs"].schema == expected_schema def test_add_foreign_key(fresh_db): fresh_db["authors"].insert_all( [{"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"}], pk="id" ) fresh_db["books"].insert_all( [ {"title": "Hedgehogs of the world", "author_id": 1}, {"title": "How to train your wolf", "author_id": 2}, ] ) assert [] == fresh_db["books"].foreign_keys t = fresh_db["books"].add_foreign_key("author_id", "authors", "id") # Ensure it returned self: assert isinstance(t, Table) and t.name == "books" assert [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ) ] == fresh_db["books"].foreign_keys def test_add_foreign_key_if_column_contains_space(fresh_db): fresh_db["authors"].insert_all([{"id": 1, "name": "Sally"}], pk="id") fresh_db["books"].insert_all( [ {"title": "Hedgehogs of the world", "author id": 1}, ] ) fresh_db["books"].add_foreign_key("author id", "authors", "id") assert fresh_db["books"].foreign_keys == [ ForeignKey( table="books", column="author id", other_table="authors", other_column="id" ) ] def test_add_foreign_key_error_if_column_does_not_exist(fresh_db): fresh_db["books"].insert( {"id": 1, "title": "Hedgehogs of the world", "author_id": 1} ) with pytest.raises(AlterError): fresh_db["books"].add_foreign_key("author2_id", "books", "id") def test_add_foreign_key_error_if_other_table_does_not_exist(fresh_db): fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1}) with pytest.raises(AlterError): fresh_db["books"].add_foreign_key("author_id", "authors", "id") def test_add_foreign_key_error_if_already_exists(fresh_db): fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1}) fresh_db["authors"].insert({"id": 1, "name": "Sally"}, pk="id") fresh_db["books"].add_foreign_key("author_id", "authors", "id") with pytest.raises(AlterError) as ex: fresh_db["books"].add_foreign_key("author_id", "authors", "id") assert "Foreign key already exists for author_id => authors.id" == ex.value.args[0] def test_add_foreign_key_no_error_if_exists_and_ignore_true(fresh_db): fresh_db["books"].insert({"title": "Hedgehogs of the world", "author_id": 1}) fresh_db["authors"].insert({"id": 1, "name": "Sally"}, pk="id") fresh_db["books"].add_foreign_key("author_id", "authors", "id") fresh_db["books"].add_foreign_key("author_id", "authors", "id", ignore=True) def test_add_foreign_keys(fresh_db): fresh_db["authors"].insert_all( [{"id": 1, "name": "Sally"}, {"id": 2, "name": "Asheesh"}], pk="id" ) fresh_db["categories"].insert_all([{"id": 1, "name": "Wildlife"}], pk="id") fresh_db["books"].insert_all( [{"title": "Hedgehogs of the world", "author_id": 1, "category_id": 1}] ) assert [] == fresh_db["books"].foreign_keys fresh_db.add_foreign_keys( [ ("books", "author_id", "authors", "id"), ("books", "category_id", "categories", "id"), ] ) assert [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ), ForeignKey( table="books", column="category_id", other_table="categories", other_column="id", ), ] == sorted(fresh_db["books"].foreign_keys) def test_add_column_foreign_key(fresh_db): fresh_db.create_table("dogs", {"name": str}) fresh_db.create_table("breeds", {"name": str}) fresh_db["dogs"].add_column("breed_id", fk="breeds") assert fresh_db["dogs"].schema == ( 'CREATE TABLE "dogs" (\n' " [name] TEXT,\n" " [breed_id] INTEGER REFERENCES [breeds]([rowid])\n" ")" ) # And again with an explicit primary key column fresh_db.create_table("subbreeds", {"name": str, "primkey": str}, pk="primkey") fresh_db["dogs"].add_column("subbreed_id", fk="subbreeds") assert fresh_db["dogs"].schema == ( 'CREATE TABLE "dogs" (\n' " [name] TEXT,\n" " [breed_id] INTEGER REFERENCES [breeds]([rowid]),\n" " [subbreed_id] TEXT REFERENCES [subbreeds]([primkey])\n" ")" ) def test_add_foreign_key_guess_table(fresh_db): fresh_db.create_table("dogs", {"name": str}) fresh_db.create_table("breeds", {"name": str, "id": int}, pk="id") fresh_db["dogs"].add_column("breed_id", int) fresh_db["dogs"].add_foreign_key("breed_id") assert fresh_db["dogs"].schema == ( 'CREATE TABLE "dogs" (\n' " [name] TEXT,\n" " [breed_id] INTEGER REFERENCES [breeds]([id])\n" ")" ) def test_index_foreign_keys(fresh_db): test_add_foreign_key_guess_table(fresh_db) assert [] == fresh_db["dogs"].indexes fresh_db.index_foreign_keys() assert [["breed_id"]] == [i.columns for i in fresh_db["dogs"].indexes] # Calling it a second time should do nothing fresh_db.index_foreign_keys() assert [["breed_id"]] == [i.columns for i in fresh_db["dogs"].indexes] def test_index_foreign_keys_if_index_name_is_already_used(fresh_db): # https://github.com/simonw/sqlite-utils/issues/335 test_add_foreign_key_guess_table(fresh_db) # Add index with a name that will conflict with index_foreign_keys() fresh_db["dogs"].create_index(["name"], index_name="idx_dogs_breed_id") fresh_db.index_foreign_keys() assert {(idx.name, tuple(idx.columns)) for idx in fresh_db["dogs"].indexes} == { ("idx_dogs_breed_id_2", ("breed_id",)), ("idx_dogs_breed_id", ("name",)), } @pytest.mark.parametrize( "extra_data,expected_new_columns", [ ({"species": "squirrels"}, [{"name": "species", "type": "TEXT"}]), ( {"species": "squirrels", "hats": 5}, [{"name": "species", "type": "TEXT"}, {"name": "hats", "type": "INTEGER"}], ), ( {"hats": 5, "rating": 3.5}, [{"name": "hats", "type": "INTEGER"}, {"name": "rating", "type": "FLOAT"}], ), ], ) @pytest.mark.parametrize("use_table_factory", [True, False]) def test_insert_row_alter_table( fresh_db, extra_data, expected_new_columns, use_table_factory ): table = fresh_db["books"] table.insert({"title": "Hedgehogs of the world", "author_id": 1}) assert [ {"name": "title", "type": "TEXT"}, {"name": "author_id", "type": "INTEGER"}, ] == [{"name": col.name, "type": col.type} for col in table.columns] record = {"title": "Squirrels of the world", "author_id": 2} record.update(extra_data) if use_table_factory: fresh_db.table("books", alter=True).insert(record) else: fresh_db["books"].insert(record, alter=True) assert [ {"name": "title", "type": "TEXT"}, {"name": "author_id", "type": "INTEGER"}, ] + expected_new_columns == [ {"name": col.name, "type": col.type} for col in table.columns ] def test_add_missing_columns_case_insensitive(fresh_db): table = fresh_db["foo"] table.insert({"id": 1, "name": "Cleo"}, pk="id") table.add_missing_columns([{"Name": ".", "age": 4}]) assert ( table.schema == "CREATE TABLE [foo] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n, [age] INTEGER)" ) @pytest.mark.parametrize("use_table_factory", [True, False]) def test_insert_replace_rows_alter_table(fresh_db, use_table_factory): first_row = {"id": 1, "title": "Hedgehogs of the world", "author_id": 1} next_rows = [ {"id": 1, "title": "Hedgehogs of the World", "species": "hedgehogs"}, {"id": 2, "title": "Squirrels of the World", "num_species": 200}, { "id": 3, "title": "Badgers of the World", "significant_continents": ["Europe", "North America"], }, ] if use_table_factory: table = fresh_db.table("books", pk="id", alter=True) table.insert(first_row) table.insert_all(next_rows, replace=True) else: table = fresh_db["books"] table.insert(first_row, pk="id") table.insert_all(next_rows, alter=True, replace=True) assert { "author_id": int, "id": int, "num_species": int, "significant_continents": str, "species": str, "title": str, } == table.columns_dict assert [ { "author_id": None, "id": 1, "num_species": None, "significant_continents": None, "species": "hedgehogs", "title": "Hedgehogs of the World", }, { "author_id": None, "id": 2, "num_species": 200, "significant_continents": None, "species": None, "title": "Squirrels of the World", }, { "author_id": None, "id": 3, "num_species": None, "significant_continents": '["Europe", "North America"]', "species": None, "title": "Badgers of the World", }, ] == list(table.rows) def test_insert_all_with_extra_columns_in_later_chunks(fresh_db): chunk = [ {"record": "Record 1"}, {"record": "Record 2"}, {"record": "Record 3"}, {"record": "Record 4", "extra": 1}, ] fresh_db["t"].insert_all(chunk, batch_size=2, alter=True) assert list(fresh_db["t"].rows) == [ {"record": "Record 1", "extra": None}, {"record": "Record 2", "extra": None}, {"record": "Record 3", "extra": None}, {"record": "Record 4", "extra": 1}, ] def test_bulk_insert_more_than_999_values(fresh_db): "Inserting 100 items with 11 columns should work" fresh_db["big"].insert_all( ( { "id": i + 1, "c2": 2, "c3": 3, "c4": 4, "c5": 5, "c6": 6, "c7": 7, "c8": 8, "c9": 9, "c10": 10, "c11": 11, } for i in range(100) ), pk="id", ) assert fresh_db["big"].count == 100 @pytest.mark.parametrize( "num_columns,should_error", ((900, False), (999, False), (1000, True)) ) def test_error_if_more_than_999_columns(fresh_db, num_columns, should_error): record = dict([("c{}".format(i), i) for i in range(num_columns)]) if should_error: with pytest.raises(AssertionError): fresh_db["big"].insert(record) else: fresh_db["big"].insert(record) def test_columns_not_in_first_record_should_not_cause_batch_to_be_too_large(fresh_db): # https://github.com/simonw/sqlite-utils/issues/145 # sqlite on homebrew and Debian/Ubuntu etc. is typically compiled with # SQLITE_MAX_VARIABLE_NUMBER set to 250,000, so we need to exceed this value to # trigger the error on these systems. THRESHOLD = 250000 batch_size = 999 extra_columns = 1 + (THRESHOLD - 1) // (batch_size - 1) records = [ {"c0": "first record"}, # one column in first record -> batch size = 999 # fill out the batch with 99 records with enough columns to exceed THRESHOLD *[ dict([("c{}".format(i), j) for i in range(extra_columns)]) for j in range(batch_size - 1) ], ] try: fresh_db["too_many_columns"].insert_all( records, alter=True, batch_size=batch_size ) except sqlite3.OperationalError: raise @pytest.mark.parametrize( "columns,index_name,expected_index", ( ( ["is good dog"], None, Index( seq=0, name="idx_dogs_is good dog", unique=0, origin="c", partial=0, columns=["is good dog"], ), ), ( ["is good dog", "age"], None, Index( seq=0, name="idx_dogs_is good dog_age", unique=0, origin="c", partial=0, columns=["is good dog", "age"], ), ), ( ["age"], "age_index", Index( seq=0, name="age_index", unique=0, origin="c", partial=0, columns=["age"], ), ), ), ) def test_create_index(fresh_db, columns, index_name, expected_index): dogs = fresh_db["dogs"] dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is good dog": True}) assert [] == dogs.indexes dogs.create_index(columns, index_name) assert expected_index == dogs.indexes[0] def test_create_index_unique(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True}) assert [] == dogs.indexes dogs.create_index(["name"], unique=True) assert ( Index( seq=0, name="idx_dogs_name", unique=1, origin="c", partial=0, columns=["name"], ) == dogs.indexes[0] ) def test_create_index_if_not_exists(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is_good_dog": True}) assert [] == dogs.indexes dogs.create_index(["name"]) assert len(dogs.indexes) == 1 with pytest.raises(Exception, match="index idx_dogs_name already exists"): dogs.create_index(["name"]) dogs.create_index(["name"], if_not_exists=True) def test_create_index_desc(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"name": "Cleo", "twitter": "cleopaws", "age": 3, "is good dog": True}) assert [] == dogs.indexes dogs.create_index([DescIndex("age"), "name"]) sql = fresh_db.execute( "select sql from sqlite_master where name='idx_dogs_age_name'" ).fetchone()[0] assert sql == ( "CREATE INDEX [idx_dogs_age_name]\n" " ON [dogs] ([age] desc, [name])" ) def test_create_index_find_unique_name(fresh_db): table = fresh_db["t"] table.insert({"id": 1}) table.create_index(["id"]) # Without find_unique_name should error with pytest.raises(OperationalError, match="index idx_t_id already exists"): table.create_index(["id"]) # With find_unique_name=True it should work table.create_index(["id"], find_unique_name=True) table.create_index(["id"], find_unique_name=True) # Should have three now index_names = {idx.name for idx in table.indexes} assert index_names == {"idx_t_id", "idx_t_id_2", "idx_t_id_3"} def test_create_index_analyze(fresh_db): dogs = fresh_db["dogs"] assert "sqlite_stat1" not in fresh_db.table_names() dogs.insert({"name": "Cleo", "twitter": "cleopaws"}) dogs.create_index(["name"], analyze=True) assert "sqlite_stat1" in fresh_db.table_names() assert list(fresh_db["sqlite_stat1"].rows) == [ {"tbl": "dogs", "idx": "idx_dogs_name", "stat": "1 1"} ] @pytest.mark.parametrize( "data_structure", ( ["list with one item"], ["list with", "two items"], {"dictionary": "simple"}, {"dictionary": {"nested": "complex"}}, collections.OrderedDict( [ ("key1", {"nested": ["cømplex"]}), ("key2", "foo"), ] ), [{"list": "of"}, {"two": "dicts"}], ), ) def test_insert_dictionaries_and_lists_as_json(fresh_db, data_structure): fresh_db["test"].insert({"id": 1, "data": data_structure}, pk="id") row = fresh_db.execute("select id, data from test").fetchone() assert row[0] == 1 assert data_structure == json.loads(row[1]) def test_insert_list_nested_unicode(fresh_db): fresh_db["test"].insert( {"id": 1, "data": {"key1": {"nested": ["cømplex"]}}}, pk="id" ) row = fresh_db.execute("select id, data from test").fetchone() assert row[1] == '{"key1": {"nested": ["cømplex"]}}' def test_insert_uuid(fresh_db): uuid4 = uuid.uuid4() fresh_db["test"].insert({"uuid": uuid4}) row = list(fresh_db["test"].rows)[0] assert {"uuid"} == row.keys() assert isinstance(row["uuid"], str) assert row["uuid"] == str(uuid4) def test_insert_memoryview(fresh_db): fresh_db["test"].insert({"data": memoryview(b"hello")}) row = list(fresh_db["test"].rows)[0] assert {"data"} == row.keys() assert isinstance(row["data"], bytes) assert row["data"] == b"hello" def test_insert_thousands_using_generator(fresh_db): fresh_db["test"].insert_all( {"i": i, "word": "word_{}".format(i)} for i in range(10000) ) assert [{"name": "i", "type": "INTEGER"}, {"name": "word", "type": "TEXT"}] == [ {"name": col.name, "type": col.type} for col in fresh_db["test"].columns ] assert fresh_db["test"].count == 10000 def test_insert_thousands_raises_exception_with_extra_columns_after_first_100(fresh_db): # https://github.com/simonw/sqlite-utils/issues/139 with pytest.raises(Exception, match="table test has no column named extra"): fresh_db["test"].insert_all( [{"i": i, "word": "word_{}".format(i)} for i in range(100)] + [{"i": 101, "extra": "This extra column should cause an exception"}], ) def test_insert_thousands_adds_extra_columns_after_first_100_with_alter(fresh_db): # https://github.com/simonw/sqlite-utils/issues/139 fresh_db["test"].insert_all( [{"i": i, "word": "word_{}".format(i)} for i in range(100)] + [{"i": 101, "extra": "Should trigger ALTER"}], alter=True, ) rows = list(fresh_db.query("select * from test where i = 101")) assert rows == [{"i": 101, "word": None, "extra": "Should trigger ALTER"}] def test_insert_ignore(fresh_db): fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id") # Should raise an error if we try this again with pytest.raises(Exception, match="UNIQUE constraint failed"): fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id") # Using ignore=True should cause our insert to be silently ignored fresh_db["test"].insert({"id": 1, "bar": 3}, pk="id", ignore=True) # Only one row, and it should be bar=2, not bar=3 rows = list(fresh_db.query("select * from test")) assert rows == [{"id": 1, "bar": 2}] def test_insert_hash_id(fresh_db): dogs = fresh_db["dogs"] id = dogs.insert({"name": "Cleo", "twitter": "cleopaws"}, hash_id="id").last_pk assert "f501265970505d9825d8d9f590bfab3519fb20b1" == id assert dogs.count == 1 # Insert replacing a second time should not create a new row id2 = dogs.insert( {"name": "Cleo", "twitter": "cleopaws"}, hash_id="id", replace=True ).last_pk assert "f501265970505d9825d8d9f590bfab3519fb20b1" == id2 assert dogs.count == 1 @pytest.mark.parametrize("use_table_factory", [True, False]) def test_insert_hash_id_columns(fresh_db, use_table_factory): if use_table_factory: dogs = fresh_db.table("dogs", hash_id_columns=("name", "twitter")) insert_kwargs = {} else: dogs = fresh_db["dogs"] insert_kwargs = dict(hash_id_columns=("name", "twitter")) id = dogs.insert( {"name": "Cleo", "twitter": "cleopaws", "age": 5}, **insert_kwargs, ).last_pk expected_hash = hash_record({"name": "Cleo", "twitter": "cleopaws"}) assert id == expected_hash assert dogs.count == 1 # Insert replacing a second time should not create a new row id2 = dogs.insert( {"name": "Cleo", "twitter": "cleopaws", "age": 6}, **insert_kwargs, replace=True, ).last_pk assert id2 == expected_hash assert dogs.count == 1 def test_vacuum(fresh_db): fresh_db["data"].insert({"foo": "foo", "bar": "bar"}) fresh_db.vacuum() def test_works_with_pathlib_path(tmpdir): path = pathlib.Path(tmpdir / "test.db") db = Database(path) db["demo"].insert_all([{"foo": 1}]) assert db["demo"].count == 1 @pytest.mark.skipif(pd is None, reason="pandas and numpy are not installed") def test_create_table_numpy(fresh_db): df = pd.DataFrame({"col 1": range(3), "col 2": range(3)}) fresh_db["pandas"].insert_all(df.to_dict(orient="records")) assert [ {"col 1": 0, "col 2": 0}, {"col 1": 1, "col 2": 1}, {"col 1": 2, "col 2": 2}, ] == list(fresh_db["pandas"].rows) # Now try all the different types df = pd.DataFrame( { "np.int8": [-8], "np.int16": [-16], "np.int32": [-32], "np.int64": [-64], "np.uint8": [8], "np.uint16": [16], "np.uint32": [32], "np.uint64": [64], "np.float16": [16.5], "np.float32": [32.5], "np.float64": [64.5], } ) df = df.astype( { "np.int8": "int8", "np.int16": "int16", "np.int32": "int32", "np.int64": "int64", "np.uint8": "uint8", "np.uint16": "uint16", "np.uint32": "uint32", "np.uint64": "uint64", "np.float16": "float16", "np.float32": "float32", "np.float64": "float64", } ) assert [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float16", "float32", "float64", ] == [str(t) for t in df.dtypes] fresh_db["types"].insert_all(df.to_dict(orient="records")) assert [ { "np.float16": 16.5, "np.float32": 32.5, "np.float64": 64.5, "np.int16": -16, "np.int32": -32, "np.int64": -64, "np.int8": -8, "np.uint16": 16, "np.uint32": 32, "np.uint64": 64, "np.uint8": 8, } ] == list(fresh_db["types"].rows) def test_cannot_provide_both_filename_and_memory(): with pytest.raises( AssertionError, match="Either specify a filename_or_conn or pass memory=True" ): Database("/tmp/foo.db", memory=True) def test_creates_id_column(fresh_db): last_pk = fresh_db.table("cats", pk="id").insert({"name": "barry"}).last_pk assert [{"name": "barry", "id": last_pk}] == list(fresh_db["cats"].rows) def test_drop(fresh_db): fresh_db["t"].insert({"foo": 1}) assert ["t"] == fresh_db.table_names() assert None is fresh_db["t"].drop() assert [] == fresh_db.table_names() def test_drop_view(fresh_db): fresh_db.create_view("foo_view", "select 1") assert ["foo_view"] == fresh_db.view_names() assert None is fresh_db["foo_view"].drop() assert [] == fresh_db.view_names() def test_drop_ignore(fresh_db): with pytest.raises(sqlite3.OperationalError): fresh_db["does_not_exist"].drop() fresh_db["does_not_exist"].drop(ignore=True) # Testing view is harder, we need to create it in order # to get a View object, then drop it twice fresh_db.create_view("foo_view", "select 1") view = fresh_db["foo_view"] assert isinstance(view, View) view.drop() with pytest.raises(sqlite3.OperationalError): view.drop() view.drop(ignore=True) def test_insert_all_empty_list(fresh_db): fresh_db["t"].insert({"foo": 1}) assert fresh_db["t"].count == 1 fresh_db["t"].insert_all([]) assert fresh_db["t"].count == 1 fresh_db["t"].insert_all([], replace=True) assert fresh_db["t"].count == 1 def test_insert_all_single_column(fresh_db): table = fresh_db["table"] table.insert_all([{"name": "Cleo"}], pk="name") assert [{"name": "Cleo"}] == list(table.rows) assert table.pks == ["name"] @pytest.mark.parametrize("method_name", ("insert_all", "upsert_all")) def test_insert_all_analyze(fresh_db, method_name): table = fresh_db["table"] table.insert_all([{"id": 1, "name": "Cleo"}], pk="id") assert "sqlite_stat1" not in fresh_db.table_names() table.create_index(["name"], analyze=True) assert list(fresh_db["sqlite_stat1"].rows) == [ {"tbl": "table", "idx": "idx_table_name", "stat": "1 1"} ] method = getattr(table, method_name) method([{"id": 2, "name": "Suna"}], pk="id", analyze=True) assert "sqlite_stat1" in fresh_db.table_names() assert list(fresh_db["sqlite_stat1"].rows) == [ {"tbl": "table", "idx": "idx_table_name", "stat": "2 1"} ] def test_create_with_a_null_column(fresh_db): record = {"name": "Name", "description": None} fresh_db["t"].insert(record) assert [record] == list(fresh_db["t"].rows) def test_create_with_nested_bytes(fresh_db): record = {"id": 1, "data": {"foo": b"bytes"}} fresh_db["t"].insert(record) assert [{"id": 1, "data": '{"foo": "b\'bytes\'"}'}] == list(fresh_db["t"].rows) @pytest.mark.parametrize( "input,expected", [("hello", "'hello'"), ("hello'there'", "'hello''there'''")] ) def test_quote(fresh_db, input, expected): assert fresh_db.quote(input) == expected @pytest.mark.parametrize( "columns,expected_sql_middle", ( ( {"id": int}, "[id] INTEGER", ), ( {"col": dict}, "[col] TEXT", ), ( {"col": tuple}, "[col] TEXT", ), ( {"col": list}, "[col] TEXT", ), ), ) def test_create_table_sql(fresh_db, columns, expected_sql_middle): sql = fresh_db.create_table_sql("t", columns) middle = sql.split("(")[1].split(")")[0].strip() assert middle == expected_sql_middle def test_create(fresh_db): fresh_db["t"].create( { "id": int, "text": str, "float": float, "integer": int, "bytes": bytes, }, pk="id", column_order=("id", "float"), not_null=("float", "integer"), defaults={"integer": 0}, ) assert fresh_db["t"].schema == ( "CREATE TABLE [t] (\n" " [id] INTEGER PRIMARY KEY,\n" " [float] FLOAT NOT NULL,\n" " [text] TEXT,\n" " [integer] INTEGER NOT NULL DEFAULT 0,\n" " [bytes] BLOB\n" ")" ) def test_create_if_not_exists(fresh_db): fresh_db["t"].create({"id": int}) # This should error with pytest.raises(sqlite3.OperationalError): fresh_db["t"].create({"id": int}) # This should not fresh_db["t"].create({"id": int}, if_not_exists=True) def test_create_if_no_columns(fresh_db): with pytest.raises(AssertionError) as error: fresh_db["t"].create({}) assert error.value.args[0] == "Tables must have at least one column" def test_create_ignore(fresh_db): fresh_db["t"].create({"id": int}) # This should error with pytest.raises(sqlite3.OperationalError): fresh_db["t"].create({"id": int}) # This should not fresh_db["t"].create({"id": int}, ignore=True) def test_create_replace(fresh_db): fresh_db["t"].create({"id": int}) # This should error with pytest.raises(sqlite3.OperationalError): fresh_db["t"].create({"id": int}) # This should not fresh_db["t"].create({"name": str}, replace=True) assert fresh_db["t"].schema == ("CREATE TABLE [t] (\n" " [name] TEXT\n" ")") @pytest.mark.parametrize( "cols,kwargs,expected_schema,should_transform", ( # Change nothing ( {"id": int, "name": str}, {"pk": "id"}, "CREATE TABLE [demo] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n)", False, ), # Drop name column, remove primary key ({"id": int}, {}, 'CREATE TABLE "demo" (\n [id] INTEGER\n)', True), # Add a new column ( {"id": int, "name": str, "age": int}, {"pk": "id"}, 'CREATE TABLE "demo" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] INTEGER\n)', True, ), # Change a column type ( {"id": int, "name": bytes}, {"pk": "id"}, 'CREATE TABLE "demo" (\n [id] INTEGER PRIMARY KEY,\n [name] BLOB\n)', True, ), # Change the primary key ( {"id": int, "name": str}, {"pk": "name"}, 'CREATE TABLE "demo" (\n [id] INTEGER,\n [name] TEXT PRIMARY KEY\n)', True, ), # Change in column order ( {"id": int, "name": str}, {"pk": "id", "column_order": ["name"]}, 'CREATE TABLE "demo" (\n [name] TEXT,\n [id] INTEGER PRIMARY KEY\n)', True, ), # Same column order is ignored ( {"id": int, "name": str}, {"pk": "id", "column_order": ["id", "name"]}, "CREATE TABLE [demo] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n)", False, ), # Change not null ( {"id": int, "name": str}, {"pk": "id", "not_null": {"name"}}, 'CREATE TABLE "demo" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT NOT NULL\n)', True, ), # Change default values ( {"id": int, "name": str}, {"pk": "id", "defaults": {"id": 0, "name": "Bob"}}, "CREATE TABLE \"demo\" (\n [id] INTEGER PRIMARY KEY DEFAULT 0,\n [name] TEXT DEFAULT 'Bob'\n)", True, ), ), ) def test_create_transform(fresh_db, cols, kwargs, expected_schema, should_transform): fresh_db.create_table("demo", {"id": int, "name": str}, pk="id") fresh_db["demo"].insert({"id": 1, "name": "Cleo"}) traces = [] with fresh_db.tracer(lambda sql, parameters: traces.append((sql, parameters))): fresh_db["demo"].create(cols, **kwargs, transform=True) at_least_one_create_table = any(sql.startswith("CREATE TABLE") for sql, _ in traces) assert should_transform == at_least_one_create_table new_schema = fresh_db["demo"].schema assert new_schema == expected_schema, repr(new_schema) assert fresh_db["demo"].count == 1 def test_rename_table(fresh_db): fresh_db["t"].insert({"foo": "bar"}) assert ["t"] == fresh_db.table_names() fresh_db.rename_table("t", "renamed") assert ["renamed"] == fresh_db.table_names() assert [{"foo": "bar"}] == list(fresh_db["renamed"].rows) # Should error if table does not exist: with pytest.raises(sqlite3.OperationalError): fresh_db.rename_table("does_not_exist", "renamed") sqlite-utils-3.35.2/tests/test_create_view.py000066400000000000000000000026331452131415600213210ustar00rootroot00000000000000import pytest from sqlite_utils.utils import OperationalError def test_create_view(fresh_db): fresh_db.create_view("bar", "select 1 + 1") rows = fresh_db.execute("select * from bar").fetchall() assert [(2,)] == rows def test_create_view_error(fresh_db): fresh_db.create_view("bar", "select 1 + 1") with pytest.raises(OperationalError): fresh_db.create_view("bar", "select 1 + 2") def test_create_view_only_arrow_one_param(fresh_db): with pytest.raises(AssertionError): fresh_db.create_view("bar", "select 1 + 2", ignore=True, replace=True) def test_create_view_ignore(fresh_db): fresh_db.create_view("bar", "select 1 + 1").create_view( "bar", "select 1 + 2", ignore=True ) rows = fresh_db.execute("select * from bar").fetchall() assert [(2,)] == rows def test_create_view_replace(fresh_db): fresh_db.create_view("bar", "select 1 + 1").create_view( "bar", "select 1 + 2", replace=True ) rows = fresh_db.execute("select * from bar").fetchall() assert [(3,)] == rows def test_create_view_replace_with_same_does_nothing(fresh_db): fresh_db.create_view("bar", "select 1 + 1") initial_version = fresh_db.execute("PRAGMA schema_version").fetchone()[0] fresh_db.create_view("bar", "select 1 + 1", replace=True) after_version = fresh_db.execute("PRAGMA schema_version").fetchone()[0] assert after_version == initial_version sqlite-utils-3.35.2/tests/test_default_value.py000066400000000000000000000026511452131415600216440ustar00rootroot00000000000000import pytest EXAMPLES = [ ("TEXT DEFAULT 'foo'", "'foo'", "'foo'"), ("TEXT DEFAULT 'foo)'", "'foo)'", "'foo)'"), ("INTEGER DEFAULT '1'", "'1'", "'1'"), ("INTEGER DEFAULT 1", "1", "'1'"), ("INTEGER DEFAULT (1)", "1", "'1'"), # Expressions ( "TEXT DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))", "STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')", "(STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW'))", ), # Special values ("TEXT DEFAULT CURRENT_TIME", "CURRENT_TIME", "CURRENT_TIME"), ("TEXT DEFAULT CURRENT_DATE", "CURRENT_DATE", "CURRENT_DATE"), ("TEXT DEFAULT CURRENT_TIMESTAMP", "CURRENT_TIMESTAMP", "CURRENT_TIMESTAMP"), ("TEXT DEFAULT current_timestamp", "current_timestamp", "current_timestamp"), ("TEXT DEFAULT (CURRENT_TIMESTAMP)", "CURRENT_TIMESTAMP", "CURRENT_TIMESTAMP"), # Strings ("TEXT DEFAULT 'CURRENT_TIMESTAMP'", "'CURRENT_TIMESTAMP'", "'CURRENT_TIMESTAMP'"), ('TEXT DEFAULT "CURRENT_TIMESTAMP"', '"CURRENT_TIMESTAMP"', '"CURRENT_TIMESTAMP"'), ] @pytest.mark.parametrize("column_def,initial_value,expected_value", EXAMPLES) def test_quote_default_value(fresh_db, column_def, initial_value, expected_value): fresh_db.execute("create table foo (col {})".format(column_def)) assert initial_value == fresh_db["foo"].columns[0].default_value assert expected_value == fresh_db.quote_default_value( fresh_db["foo"].columns[0].default_value ) sqlite-utils-3.35.2/tests/test_delete.py000066400000000000000000000025731452131415600202710ustar00rootroot00000000000000def test_delete_rowid_table(fresh_db): table = fresh_db["table"] table.insert({"foo": 1}).last_pk rowid = table.insert({"foo": 2}).last_pk table.delete(rowid) assert [{"foo": 1}] == list(table.rows) def test_delete_pk_table(fresh_db): table = fresh_db["table"] table.insert({"id": 1}, pk="id") table.insert({"id": 2}, pk="id") table.delete(1) assert [{"id": 2}] == list(table.rows) def test_delete_where(fresh_db): table = fresh_db["table"] for i in range(1, 11): table.insert({"id": i}, pk="id") assert table.count == 10 table.delete_where("id > ?", [5]) assert table.count == 5 def test_delete_where_all(fresh_db): table = fresh_db["table"] for i in range(1, 11): table.insert({"id": i}, pk="id") assert table.count == 10 table.delete_where() assert table.count == 0 def test_delete_where_analyze(fresh_db): table = fresh_db["table"] table.insert_all(({"id": i, "i": i} for i in range(10)), pk="id") table.create_index(["i"], analyze=True) assert "sqlite_stat1" in fresh_db.table_names() assert list(fresh_db["sqlite_stat1"].rows) == [ {"tbl": "table", "idx": "idx_table_i", "stat": "10 1"} ] table.delete_where("id > ?", [5], analyze=True) assert list(fresh_db["sqlite_stat1"].rows) == [ {"tbl": "table", "idx": "idx_table_i", "stat": "6 1"} ] sqlite-utils-3.35.2/tests/test_docs.py000066400000000000000000000031751452131415600177560ustar00rootroot00000000000000from click.testing import CliRunner from sqlite_utils import cli, recipes from pathlib import Path import pytest import re docs_path = Path(__file__).parent.parent / "docs" commands_re = re.compile(r"(?:\$ | )sqlite-utils (\S+)") recipes_re = re.compile(r"r\.(\w+)\(") @pytest.fixture(scope="session") def documented_commands(): rst = "" for doc in ("cli.rst", "plugins.rst"): rst += (docs_path / doc).read_text() return { command for command in commands_re.findall(rst) if "." not in command and ":" not in command } @pytest.fixture(scope="session") def documented_recipes(): rst = (docs_path / "cli.rst").read_text() return set(recipes_re.findall(rst)) @pytest.mark.parametrize("command", cli.cli.commands.keys()) def test_commands_are_documented(documented_commands, command): assert command in documented_commands @pytest.mark.parametrize("command", cli.cli.commands.values()) def test_commands_have_help(command): assert command.help, "{} is missing its help".format(command) def test_convert_help(): result = CliRunner().invoke(cli.cli, ["convert", "--help"]) assert result.exit_code == 0 for expected in ( "r.jsonsplit(value, ", "r.parsedate(value, ", "r.parsedatetime(value, ", ): assert expected in result.output @pytest.mark.parametrize( "recipe", [ n for n in dir(recipes) if not n.startswith("_") and n not in ("json", "parser") and callable(getattr(recipes, n)) ], ) def test_recipes_are_documented(documented_recipes, recipe): assert recipe in documented_recipes sqlite-utils-3.35.2/tests/test_duplicate.py000066400000000000000000000023531452131415600207750ustar00rootroot00000000000000from sqlite_utils.db import NoTable import datetime import pytest def test_duplicate(fresh_db): # Create table using native Sqlite statement: fresh_db.execute( """CREATE TABLE [table1] ( [text_col] TEXT, [real_col] REAL, [int_col] INTEGER, [bool_col] INTEGER, [datetime_col] TEXT)""" ) # Insert one row of mock data: dt = datetime.datetime.now() data = { "text_col": "Cleo", "real_col": 3.14, "int_col": -255, "bool_col": True, "datetime_col": str(dt), } table1 = fresh_db["table1"] row_id = table1.insert(data).last_rowid # Duplicate table: table2 = table1.duplicate("table2") # Ensure data integrity: assert data == table2.get(row_id) # Ensure schema integrity: assert [ {"name": "text_col", "type": "TEXT"}, {"name": "real_col", "type": "REAL"}, {"name": "int_col", "type": "INT"}, {"name": "bool_col", "type": "INT"}, {"name": "datetime_col", "type": "TEXT"}, ] == [{"name": col.name, "type": col.type} for col in table2.columns] def test_duplicate_fails_if_table_does_not_exist(fresh_db): with pytest.raises(NoTable): fresh_db["not_a_table"].duplicate("duplicated") sqlite-utils-3.35.2/tests/test_enable_counts.py000066400000000000000000000144361452131415600216510ustar00rootroot00000000000000from sqlite_utils import Database from sqlite_utils import cli from click.testing import CliRunner import pytest def test_enable_counts_specific_table(fresh_db): foo = fresh_db["foo"] assert fresh_db.table_names() == [] for i in range(10): foo.insert({"name": "item {}".format(i)}) assert fresh_db.table_names() == ["foo"] assert foo.count == 10 # Now enable counts foo.enable_counts() assert foo.triggers_dict == { "foo_counts_insert": ( "CREATE TRIGGER [foo_counts_insert] AFTER INSERT ON [foo]\n" "BEGIN\n" " INSERT OR REPLACE INTO [_counts]\n" " VALUES (\n 'foo',\n" " COALESCE(\n" " (SELECT count FROM [_counts] WHERE [table] = 'foo'),\n" " 0\n" " ) + 1\n" " );\n" "END" ), "foo_counts_delete": ( "CREATE TRIGGER [foo_counts_delete] AFTER DELETE ON [foo]\n" "BEGIN\n" " INSERT OR REPLACE INTO [_counts]\n" " VALUES (\n" " 'foo',\n" " COALESCE(\n" " (SELECT count FROM [_counts] WHERE [table] = 'foo'),\n" " 0\n" " ) - 1\n" " );\n" "END" ), } assert fresh_db.table_names() == ["foo", "_counts"] assert list(fresh_db["_counts"].rows) == [{"count": 10, "table": "foo"}] # Add some items to test the triggers for i in range(5): foo.insert({"name": "item {}".format(10 + i)}) assert foo.count == 15 assert list(fresh_db["_counts"].rows) == [{"count": 15, "table": "foo"}] # Delete some items foo.delete_where("rowid < 7") assert foo.count == 9 assert list(fresh_db["_counts"].rows) == [{"count": 9, "table": "foo"}] foo.delete_where() assert foo.count == 0 assert list(fresh_db["_counts"].rows) == [{"count": 0, "table": "foo"}] def test_enable_counts_all_tables(fresh_db): foo = fresh_db["foo"] bar = fresh_db["bar"] foo.insert({"name": "Cleo"}) bar.insert({"name": "Cleo"}) foo.enable_fts(["name"]) fresh_db.enable_counts() assert set(fresh_db.table_names()) == { "foo", "bar", "foo_fts", "foo_fts_data", "foo_fts_idx", "foo_fts_docsize", "foo_fts_config", "_counts", } assert list(fresh_db["_counts"].rows) == [ {"count": 1, "table": "foo"}, {"count": 1, "table": "bar"}, {"count": 3, "table": "foo_fts_data"}, {"count": 1, "table": "foo_fts_idx"}, {"count": 1, "table": "foo_fts_docsize"}, {"count": 1, "table": "foo_fts_config"}, ] @pytest.fixture def counts_db_path(tmpdir): path = str(tmpdir / "test.db") db = Database(path) db["foo"].insert({"name": "bar"}) db["bar"].insert({"name": "bar"}) db["bar"].insert({"name": "bar"}) db["baz"].insert({"name": "bar"}) return path @pytest.mark.parametrize( "extra_args,expected_triggers", [ ( [], [ "foo_counts_insert", "foo_counts_delete", "bar_counts_insert", "bar_counts_delete", "baz_counts_insert", "baz_counts_delete", ], ), ( ["bar"], [ "bar_counts_insert", "bar_counts_delete", ], ), ], ) def test_cli_enable_counts(counts_db_path, extra_args, expected_triggers): db = Database(counts_db_path) assert list(db.triggers_dict.keys()) == [] result = CliRunner().invoke(cli.cli, ["enable-counts", counts_db_path] + extra_args) assert result.exit_code == 0 assert list(db.triggers_dict.keys()) == expected_triggers def test_uses_counts_after_enable_counts(counts_db_path): db = Database(counts_db_path) logged = [] with db.tracer(lambda sql, parameters: logged.append((sql, parameters))): assert db["foo"].count == 1 assert logged == [ ("select name from sqlite_master where type = 'view'", None), ("select count(*) from [foo]", []), ] logged.clear() assert not db.use_counts_table db.enable_counts() assert db.use_counts_table assert db["foo"].count == 1 assert logged == [ ( "CREATE TABLE IF NOT EXISTS [_counts](\n [table] TEXT PRIMARY KEY,\n count INTEGER DEFAULT 0\n);", None, ), ("select name from sqlite_master where type = 'table'", None), ("select name from sqlite_master where type = 'view'", None), ("select name from sqlite_master where type = 'view'", None), ("select name from sqlite_master where type = 'view'", None), ("select name from sqlite_master where type = 'view'", None), ("select sql from sqlite_master where name = ?", ("foo",)), ("SELECT quote(:value)", {"value": "foo"}), ("select sql from sqlite_master where name = ?", ("bar",)), ("SELECT quote(:value)", {"value": "bar"}), ("select sql from sqlite_master where name = ?", ("baz",)), ("SELECT quote(:value)", {"value": "baz"}), ("select sql from sqlite_master where name = ?", ("_counts",)), ("select name from sqlite_master where type = 'view'", None), ("select [table], count from _counts where [table] in (?)", ["foo"]), ] def test_reset_counts(counts_db_path): db = Database(counts_db_path) db["foo"].enable_counts() db["bar"].enable_counts() assert db.cached_counts() == {"foo": 1, "bar": 2} # Corrupt the value db["_counts"].update("foo", {"count": 3}) assert db.cached_counts() == {"foo": 3, "bar": 2} assert db["foo"].count == 3 # Reset them db.reset_counts() assert db.cached_counts() == {"foo": 1, "bar": 2} assert db["foo"].count == 1 def test_reset_counts_cli(counts_db_path): db = Database(counts_db_path) db["foo"].enable_counts() db["bar"].enable_counts() assert db.cached_counts() == {"foo": 1, "bar": 2} db["_counts"].update("foo", {"count": 3}) result = CliRunner().invoke(cli.cli, ["reset-counts", counts_db_path]) assert result.exit_code == 0 assert db.cached_counts() == {"foo": 1, "bar": 2} sqlite-utils-3.35.2/tests/test_extract.py000066400000000000000000000154771452131415600205100ustar00rootroot00000000000000from sqlite_utils.db import InvalidColumns import itertools import pytest @pytest.mark.parametrize("table", [None, "Species"]) @pytest.mark.parametrize("fk_column", [None, "species"]) def test_extract_single_column(fresh_db, table, fk_column): expected_table = table or "species" expected_fk = fk_column or "{}_id".format(expected_table) iter_species = itertools.cycle(["Palm", "Spruce", "Mangrove", "Oak"]) fresh_db["tree"].insert_all( ( { "id": i, "name": "Tree {}".format(i), "species": next(iter_species), "end": 1, } for i in range(1, 1001) ), pk="id", ) fresh_db["tree"].extract("species", table=table, fk_column=fk_column) assert fresh_db["tree"].schema == ( 'CREATE TABLE "tree" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [{}] INTEGER REFERENCES [{}]([id]),\n".format(expected_fk, expected_table) + " [end] INTEGER\n" + ")" ) assert fresh_db[expected_table].schema == ( "CREATE TABLE [{}] (\n".format(expected_table) + " [id] INTEGER PRIMARY KEY,\n" " [species] TEXT\n" ")" ) assert list(fresh_db[expected_table].rows) == [ {"id": 1, "species": "Palm"}, {"id": 2, "species": "Spruce"}, {"id": 3, "species": "Mangrove"}, {"id": 4, "species": "Oak"}, ] assert list(itertools.islice(fresh_db["tree"].rows, 0, 4)) == [ {"id": 1, "name": "Tree 1", expected_fk: 1, "end": 1}, {"id": 2, "name": "Tree 2", expected_fk: 2, "end": 1}, {"id": 3, "name": "Tree 3", expected_fk: 3, "end": 1}, {"id": 4, "name": "Tree 4", expected_fk: 4, "end": 1}, ] def test_extract_multiple_columns_with_rename(fresh_db): iter_common = itertools.cycle(["Palm", "Spruce", "Mangrove", "Oak"]) iter_latin = itertools.cycle(["Arecaceae", "Picea", "Rhizophora", "Quercus"]) fresh_db["tree"].insert_all( ( { "id": i, "name": "Tree {}".format(i), "common_name": next(iter_common), "latin_name": next(iter_latin), } for i in range(1, 1001) ), pk="id", ) fresh_db["tree"].extract( ["common_name", "latin_name"], rename={"common_name": "name"} ) assert fresh_db["tree"].schema == ( 'CREATE TABLE "tree" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [common_name_latin_name_id] INTEGER REFERENCES [common_name_latin_name]([id])\n" ")" ) assert fresh_db["common_name_latin_name"].schema == ( "CREATE TABLE [common_name_latin_name] (\n" " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [latin_name] TEXT\n" ")" ) assert list(fresh_db["common_name_latin_name"].rows) == [ {"name": "Palm", "id": 1, "latin_name": "Arecaceae"}, {"name": "Spruce", "id": 2, "latin_name": "Picea"}, {"name": "Mangrove", "id": 3, "latin_name": "Rhizophora"}, {"name": "Oak", "id": 4, "latin_name": "Quercus"}, ] assert list(itertools.islice(fresh_db["tree"].rows, 0, 4)) == [ {"id": 1, "name": "Tree 1", "common_name_latin_name_id": 1}, {"id": 2, "name": "Tree 2", "common_name_latin_name_id": 2}, {"id": 3, "name": "Tree 3", "common_name_latin_name_id": 3}, {"id": 4, "name": "Tree 4", "common_name_latin_name_id": 4}, ] def test_extract_invalid_columns(fresh_db): fresh_db["tree"].insert( { "id": 1, "name": "Tree 1", "common_name": "Palm", "latin_name": "Arecaceae", }, pk="id", ) with pytest.raises(InvalidColumns): fresh_db["tree"].extract(["bad_column"]) def test_extract_rowid_table(fresh_db): fresh_db["tree"].insert( { "name": "Tree 1", "common_name": "Palm", "latin_name": "Arecaceae", } ) fresh_db["tree"].extract(["common_name", "latin_name"]) assert fresh_db["tree"].schema == ( 'CREATE TABLE "tree" (\n' " [name] TEXT,\n" " [common_name_latin_name_id] INTEGER REFERENCES [common_name_latin_name]([id])\n" ")" ) assert ( fresh_db.execute( """ select tree.name, common_name_latin_name.common_name, common_name_latin_name.latin_name from tree join common_name_latin_name on tree.common_name_latin_name_id = common_name_latin_name.id """ ).fetchall() == [("Tree 1", "Palm", "Arecaceae")] ) def test_reuse_lookup_table(fresh_db): fresh_db["species"].insert({"id": 1, "name": "Wolf"}, pk="id") fresh_db["sightings"].insert({"id": 10, "species": "Wolf"}, pk="id") fresh_db["individuals"].insert( {"id": 10, "name": "Terriana", "species": "Fox"}, pk="id" ) fresh_db["sightings"].extract("species", rename={"species": "name"}) fresh_db["individuals"].extract("species", rename={"species": "name"}) assert fresh_db["sightings"].schema == ( 'CREATE TABLE "sightings" (\n' " [id] INTEGER PRIMARY KEY,\n" " [species_id] INTEGER REFERENCES [species]([id])\n" ")" ) assert fresh_db["individuals"].schema == ( 'CREATE TABLE "individuals" (\n' " [id] INTEGER PRIMARY KEY,\n" " [name] TEXT,\n" " [species_id] INTEGER REFERENCES [species]([id])\n" ")" ) assert list(fresh_db["species"].rows) == [ {"id": 1, "name": "Wolf"}, {"id": 2, "name": "Fox"}, ] def test_extract_error_on_incompatible_existing_lookup_table(fresh_db): fresh_db["species"].insert({"id": 1}) fresh_db["tree"].insert({"name": "Tree 1", "common_name": "Palm"}) with pytest.raises(InvalidColumns): fresh_db["tree"].extract("common_name", table="species") # Try again with incompatible existing column type fresh_db["species2"].insert({"id": 1, "common_name": 3.5}) with pytest.raises(InvalidColumns): fresh_db["tree"].extract("common_name", table="species2") def test_extract_works_with_null_values(fresh_db): fresh_db["listens"].insert_all( [ {"id": 1, "track_title": "foo", "album_title": "bar"}, {"id": 2, "track_title": "baz", "album_title": None}, ], pk="id", ) fresh_db["listens"].extract( columns=["album_title"], table="albums", fk_column="album_id" ) assert list(fresh_db["listens"].rows) == [ {"id": 1, "track_title": "foo", "album_id": 1}, {"id": 2, "track_title": "baz", "album_id": 2}, ] assert list(fresh_db["albums"].rows) == [ {"id": 1, "album_title": "bar"}, {"id": 2, "album_title": None}, ] sqlite-utils-3.35.2/tests/test_extracts.py000066400000000000000000000041431452131415600206570ustar00rootroot00000000000000from sqlite_utils.db import Index import pytest @pytest.mark.parametrize( "kwargs,expected_table", [ (dict(extracts={"species_id": "Species"}), "Species"), (dict(extracts=["species_id"]), "species_id"), (dict(extracts=("species_id",)), "species_id"), ], ) @pytest.mark.parametrize("use_table_factory", [True, False]) def test_extracts(fresh_db, kwargs, expected_table, use_table_factory): table_kwargs = {} insert_kwargs = {} if use_table_factory: table_kwargs = kwargs else: insert_kwargs = kwargs trees = fresh_db.table("Trees", **table_kwargs) trees.insert_all( [ {"id": 1, "species_id": "Oak"}, {"id": 2, "species_id": "Oak"}, {"id": 3, "species_id": "Palm"}, ], **insert_kwargs ) # Should now have two tables: Trees and Species assert {expected_table, "Trees"} == set(fresh_db.table_names()) assert ( "CREATE TABLE [{}] (\n [id] INTEGER PRIMARY KEY,\n [value] TEXT\n)".format( expected_table ) == fresh_db[expected_table].schema ) assert ( "CREATE TABLE [Trees] (\n [id] INTEGER,\n [species_id] INTEGER REFERENCES [{}]([id])\n)".format( expected_table ) == fresh_db["Trees"].schema ) # Should have a foreign key reference assert len(fresh_db["Trees"].foreign_keys) == 1 fk = fresh_db["Trees"].foreign_keys[0] assert fk.table == "Trees" assert fk.column == "species_id" # Should have unique index on Species assert [ Index( seq=0, name="idx_{}_value".format(expected_table), unique=1, origin="c", partial=0, columns=["value"], ) ] == fresh_db[expected_table].indexes # Finally, check the rows assert [{"id": 1, "value": "Oak"}, {"id": 2, "value": "Palm"}] == list( fresh_db[expected_table].rows ) assert [ {"id": 1, "species_id": 1}, {"id": 2, "species_id": 1}, {"id": 3, "species_id": 2}, ] == list(fresh_db["Trees"].rows) sqlite-utils-3.35.2/tests/test_fts.py000066400000000000000000000507141452131415600176230ustar00rootroot00000000000000import pytest from sqlite_utils import Database from sqlite_utils.utils import sqlite3 search_records = [ { "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", }, { "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", }, ] def test_enable_fts(fresh_db): table = fresh_db["searchable"] table.insert_all(search_records) assert ["searchable"] == fresh_db.table_names() table.enable_fts(["text", "country"], fts_version="FTS4") assert [ "searchable", "searchable_fts", "searchable_fts_segments", "searchable_fts_segdir", "searchable_fts_docsize", "searchable_fts_stat", ] == fresh_db.table_names() assert [ { "rowid": 1, "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", } ] == list(table.search("tanuki")) assert [ { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", } ] == list(table.search("usa")) assert [] == list(table.search("bar")) def test_enable_fts_escape_table_names(fresh_db): # Table names with restricted chars are handled correctly. # colons and dots are restricted characters for table names. table = fresh_db["http://example.com"] table.insert_all(search_records) assert ["http://example.com"] == fresh_db.table_names() table.enable_fts(["text", "country"], fts_version="FTS4") assert [ "http://example.com", "http://example.com_fts", "http://example.com_fts_segments", "http://example.com_fts_segdir", "http://example.com_fts_docsize", "http://example.com_fts_stat", ] == fresh_db.table_names() assert [ { "rowid": 1, "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", } ] == list(table.search("tanuki")) assert [ { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", } ] == list(table.search("usa")) assert [] == list(table.search("bar")) def test_search_limit_offset(fresh_db): table = fresh_db["t"] table.insert_all(search_records) table.enable_fts(["text", "country"], fts_version="FTS4") assert len(list(table.search("are"))) == 2 assert len(list(table.search("are", limit=1))) == 1 assert list(table.search("are", limit=1, order_by="rowid"))[0]["rowid"] == 1 assert ( list(table.search("are", limit=1, offset=1, order_by="rowid"))[0]["rowid"] == 2 ) @pytest.mark.parametrize("fts_version", ("FTS4", "FTS5")) def test_search_where(fresh_db, fts_version): table = fresh_db["t"] table.insert_all(search_records) table.enable_fts(["text", "country"], fts_version=fts_version) results = list( table.search("are", where="country = :country", where_args={"country": "Japan"}) ) assert results == [ { "rowid": 1, "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", } ] def test_search_where_args_disallows_query(fresh_db): table = fresh_db["t"] with pytest.raises(ValueError) as ex: list( table.search( "x", where="author = :query", where_args={"query": "not allowed"} ) ) assert ( ex.value.args[0] == "'query' is a reserved key and cannot be passed to where_args for .search()" ) def test_enable_fts_table_names_containing_spaces(fresh_db): table = fresh_db["test"] table.insert({"column with spaces": "in its name"}) table.enable_fts(["column with spaces"]) assert [ "test", "test_fts", "test_fts_data", "test_fts_idx", "test_fts_docsize", "test_fts_config", ] == fresh_db.table_names() def test_populate_fts(fresh_db): table = fresh_db["populatable"] table.insert(search_records[0]) table.enable_fts(["text", "country"], fts_version="FTS4") assert [] == list(table.search("trash pandas")) table.insert(search_records[1]) assert [] == list(table.search("trash pandas")) # Now run populate_fts to make this record available table.populate_fts(["text", "country"]) rows = list(table.search("usa")) assert [ { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", } ] == rows def test_populate_fts_escape_table_names(fresh_db): # Restricted characters such as colon and dots should be escaped. table = fresh_db["http://example.com"] table.insert(search_records[0]) table.enable_fts(["text", "country"], fts_version="FTS4") assert [] == list(table.search("trash pandas")) table.insert(search_records[1]) assert [] == list(table.search("trash pandas")) # Now run populate_fts to make this record available table.populate_fts(["text", "country"]) assert [ { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", } ] == list(table.search("usa")) @pytest.mark.parametrize("fts_version", ("4", "5")) def test_fts_tokenize(fresh_db, fts_version): table_name = "searchable_{}".format(fts_version) table = fresh_db[table_name] table.insert_all(search_records) # Test without porter stemming table.enable_fts( ["text", "country"], fts_version="FTS{}".format(fts_version), ) assert [] == list(table.search("bite")) # Test WITH stemming table.disable_fts() table.enable_fts( ["text", "country"], fts_version="FTS{}".format(fts_version), tokenize="porter", ) rows = list(table.search("bite", order_by="rowid")) assert len(rows) == 1 assert { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", }.items() <= rows[0].items() def test_optimize_fts(fresh_db): for fts_version in ("4", "5"): table_name = "searchable_{}".format(fts_version) table = fresh_db[table_name] table.insert_all(search_records) table.enable_fts(["text", "country"], fts_version="FTS{}".format(fts_version)) # You can call optimize successfully against the tables OR their _fts equivalents: for table_name in ( "searchable_4", "searchable_5", "searchable_4_fts", "searchable_5_fts", ): fresh_db[table_name].optimize() def test_enable_fts_with_triggers(fresh_db): table = fresh_db["searchable"] table.insert(search_records[0]) table.enable_fts(["text", "country"], fts_version="FTS4", create_triggers=True) rows1 = list(table.search("tanuki")) assert len(rows1) == 1 assert rows1 == [ { "rowid": 1, "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", } ] table.insert(search_records[1]) # Triggers will auto-populate FTS virtual table, not need to call populate_fts() rows2 = list(table.search("usa")) assert rows2 == [ { "rowid": 2, "text": "racoons are biting trash pandas", "country": "USA", "not_searchable": "bar", } ] assert [] == list(table.search("bar")) @pytest.mark.parametrize("create_triggers", [True, False]) def test_disable_fts(fresh_db, create_triggers): table = fresh_db["searchable"] table.insert(search_records[0]) table.enable_fts(["text", "country"], create_triggers=create_triggers) assert { "searchable", "searchable_fts", "searchable_fts_data", "searchable_fts_idx", "searchable_fts_docsize", "searchable_fts_config", } == set(fresh_db.table_names()) if create_triggers: expected_triggers = {"searchable_ai", "searchable_ad", "searchable_au"} else: expected_triggers = set() assert expected_triggers == set( r[0] for r in fresh_db.execute( "select name from sqlite_master where type = 'trigger'" ).fetchall() ) # Now run .disable_fts() and confirm it worked table.disable_fts() assert ( 0 == fresh_db.execute( "select count(*) from sqlite_master where type = 'trigger'" ).fetchone()[0] ) assert ["searchable"] == fresh_db.table_names() def test_rebuild_fts(fresh_db): table = fresh_db["searchable"] table.insert(search_records[0]) table.enable_fts(["text", "country"]) # Run a search rows = list(table.search("are")) assert len(rows) == 1 assert { "rowid": 1, "text": "tanuki are running tricksters", "country": "Japan", "not_searchable": "foo", }.items() <= rows[0].items() # Insert another record table.insert(search_records[1]) # This should NOT show up in searches assert len(list(table.search("are"))) == 1 # Running rebuild_fts() should fix it table.rebuild_fts() rows2 = list(table.search("are")) assert len(rows2) == 2 @pytest.mark.parametrize("invalid_table", ["does_not_exist", "not_searchable"]) def test_rebuild_fts_invalid(fresh_db, invalid_table): fresh_db["not_searchable"].insert({"foo": "bar"}) # Raise OperationalError on invalid table with pytest.raises(sqlite3.OperationalError): fresh_db[invalid_table].rebuild_fts() @pytest.mark.parametrize("fts_version", ["FTS4", "FTS5"]) def test_rebuild_removes_junk_docsize_rows(tmpdir, fts_version): # Recreating https://github.com/simonw/sqlite-utils/issues/149 path = tmpdir / "test.db" db = Database(str(path), recursive_triggers=False) licenses = [{"key": "apache2", "name": "Apache 2"}, {"key": "bsd", "name": "BSD"}] db["licenses"].insert_all(licenses, pk="key", replace=True) db["licenses"].enable_fts(["name"], create_triggers=True, fts_version=fts_version) assert db["licenses_fts_docsize"].count == 2 # Bug: insert with replace increases the number of rows in _docsize: db["licenses"].insert_all(licenses, pk="key", replace=True) assert db["licenses_fts_docsize"].count == 4 # rebuild should fix this: db["licenses_fts"].rebuild_fts() assert db["licenses_fts_docsize"].count == 2 @pytest.mark.parametrize( "kwargs", [ {"columns": ["title"]}, {"fts_version": "FTS4"}, {"create_triggers": True}, {"tokenize": "porter"}, ], ) def test_enable_fts_replace(kwargs): db = Database(memory=True) db["books"].insert( { "id": 1, "title": "Habits of Australian Marsupials", "author": "Marlee Hawkins", }, pk="id", ) db["books"].enable_fts(["title", "author"]) assert not db["books"].triggers assert db["books_fts"].columns_dict.keys() == {"title", "author"} assert "FTS5" in db["books_fts"].schema assert "porter" not in db["books_fts"].schema # Now modify the FTS configuration should_have_changed_columns = "columns" in kwargs if "columns" not in kwargs: kwargs["columns"] = ["title", "author"] db["books"].enable_fts(**kwargs, replace=True) # Check that the new configuration is correct if should_have_changed_columns: assert db["books_fts"].columns_dict.keys() == set(["title"]) if "create_triggers" in kwargs: assert db["books"].triggers if "fts_version" in kwargs: assert "FTS4" in db["books_fts"].schema if "tokenize" in kwargs: assert "porter" in db["books_fts"].schema def test_enable_fts_replace_does_nothing_if_args_the_same(): queries = [] db = Database(memory=True, tracer=lambda sql, params: queries.append((sql, params))) db["books"].insert( { "id": 1, "title": "Habits of Australian Marsupials", "author": "Marlee Hawkins", }, pk="id", ) db["books"].enable_fts(["title", "author"], create_triggers=True) queries.clear() # Running that again shouldn't run much SQL: db["books"].enable_fts(["title", "author"], create_triggers=True, replace=True) # The only SQL that executed should be select statements assert all(q[0].startswith("select ") for q in queries) def test_enable_fts_error_message_on_views(): db = Database(memory=True) db.create_view("hello", "select 1 + 1") with pytest.raises(NotImplementedError) as e: db["hello"].enable_fts() assert e.value.args[0] == "enable_fts() is supported on tables but not on views" @pytest.mark.parametrize( "kwargs,fts,expected", [ ( {}, "FTS5", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " [books_fts].rank" ), ), ( {"columns": ["title"], "order_by": "rowid", "limit": 10}, "FTS5", ( "with original as (\n" " select\n" " rowid,\n" " [title]\n" " from [books]\n" ")\n" "select\n" " [original].[title]\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rowid\n" "limit 10" ), ), ( {"where": "author = :author"}, "FTS5", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" " where author = :author\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " [books_fts].rank" ), ), ( {"columns": ["title"]}, "FTS4", ( "with original as (\n" " select\n" " rowid,\n" " [title]\n" " from [books]\n" ")\n" "select\n" " [original].[title]\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rank_bm25(matchinfo([books_fts], 'pcnalx'))" ), ), ( {"offset": 1, "limit": 1}, "FTS4", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rank_bm25(matchinfo([books_fts], 'pcnalx'))\n" "limit 1 offset 1" ), ), ( {"limit": 2}, "FTS4", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rank_bm25(matchinfo([books_fts], 'pcnalx'))\n" "limit 2" ), ), ( {"where": "author = :author"}, "FTS4", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" " where author = :author\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rank_bm25(matchinfo([books_fts], 'pcnalx'))" ), ), ( {"include_rank": True}, "FTS5", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" ")\n" "select\n" " [original].*,\n" " [books_fts].rank rank\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " [books_fts].rank" ), ), ( {"include_rank": True}, "FTS4", ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [books]\n" ")\n" "select\n" " [original].*,\n" " rank_bm25(matchinfo([books_fts], 'pcnalx')) rank\n" "from\n" " [original]\n" " join [books_fts] on [original].rowid = [books_fts].rowid\n" "where\n" " [books_fts] match :query\n" "order by\n" " rank_bm25(matchinfo([books_fts], 'pcnalx'))" ), ), ], ) def test_search_sql(kwargs, fts, expected): db = Database(memory=True) db["books"].insert( { "title": "Habits of Australian Marsupials", "author": "Marlee Hawkins", } ) db["books"].enable_fts(["title", "author"], fts_version=fts) sql = db["books"].search_sql(**kwargs) assert sql == expected @pytest.mark.parametrize( "input,expected", ( ("dog", '"dog"'), ("cat,", '"cat,"'), ("cat's", '"cat\'s"'), ("dog.", '"dog."'), ("cat dog", '"cat" "dog"'), # If a phrase is already double quoted, leave it so ('"cat dog"', '"cat dog"'), ('"cat dog" fish', '"cat dog" "fish"'), # Sensibly handle unbalanced double quotes ('cat"', '"cat"'), ('"cat dog" "fish', '"cat dog" "fish"'), ), ) def test_quote_fts_query(fresh_db, input, expected): table = fresh_db["searchable"] table.insert_all(search_records) table.enable_fts(["text", "country"]) quoted = fresh_db.quote_fts(input) assert quoted == expected # Executing query does not crash. list(table.search(quoted)) def test_search_quote(fresh_db): table = fresh_db["searchable"] table.insert_all(search_records) table.enable_fts(["text", "country"]) query = "cat's" with pytest.raises(sqlite3.OperationalError): list(table.search(query)) # No exception with quote=True list(table.search(query, quote=True)) sqlite-utils-3.35.2/tests/test_get.py000066400000000000000000000016421452131415600176020ustar00rootroot00000000000000import pytest from sqlite_utils.db import NotFoundError def test_get_rowid(fresh_db): dogs = fresh_db["dogs"] cleo = {"name": "Cleo", "age": 4} row_id = dogs.insert(cleo).last_rowid assert cleo == dogs.get(row_id) def test_get_primary_key(fresh_db): dogs = fresh_db["dogs"] cleo = {"name": "Cleo", "age": 4, "id": 5} last_pk = dogs.insert(cleo, pk="id").last_pk assert 5 == last_pk assert cleo == dogs.get(5) @pytest.mark.parametrize( "argument,expected_msg", [(100, None), (None, None), ((1, 2), "Need 1 primary key value"), ("2", None)], ) def test_get_not_found(argument, expected_msg, fresh_db): fresh_db["dogs"].insert( {"id": 1, "name": "Cleo", "age": 4, "is_good": True}, pk="id" ) with pytest.raises(NotFoundError) as excinfo: fresh_db["dogs"].get(argument) if expected_msg is not None: assert expected_msg == excinfo.value.args[0] sqlite-utils-3.35.2/tests/test_gis.py000066400000000000000000000147471452131415600176170ustar00rootroot00000000000000import json import pytest from click.testing import CliRunner from sqlite_utils.cli import cli from sqlite_utils.db import Database from sqlite_utils.utils import find_spatialite, sqlite3 try: import sqlean except ImportError: sqlean = None pytestmark = [ pytest.mark.skipif( not find_spatialite(), reason="Could not find SpatiaLite extension" ), pytest.mark.skipif( not hasattr(sqlite3.Connection, "enable_load_extension"), reason="sqlite3.Connection missing enable_load_extension", ), pytest.mark.skipif( sqlean is not None, reason="sqlean.py is not compatible with SpatiaLite" ), ] # python API tests def test_find_spatialite(): spatialite = find_spatialite() assert spatialite is None or isinstance(spatialite, str) def test_init_spatialite(): db = Database(memory=True) spatialite = find_spatialite() db.init_spatialite(spatialite) assert "spatial_ref_sys" in db.table_names() def test_add_geometry_column(): db = Database(memory=True) spatialite = find_spatialite() db.init_spatialite(spatialite) # create a table first table = db.create_table("locations", {"id": str, "properties": str}) table.add_geometry_column( column_name="geometry", geometry_type="Point", srid=4326, coord_dimension=2, ) assert db["geometry_columns"].get(["locations", "geometry"]) == { "f_table_name": "locations", "f_geometry_column": "geometry", "geometry_type": 1, # point "coord_dimension": 2, "srid": 4326, "spatial_index_enabled": 0, } def test_create_spatial_index(): db = Database(memory=True) spatialite = find_spatialite() assert db.init_spatialite(spatialite) # create a table, add a geometry column with default values table = db.create_table("locations", {"id": str, "properties": str}) assert table.add_geometry_column("geometry", "Point") # index it assert table.create_spatial_index("geometry") assert "idx_locations_geometry" in db.table_names() def test_double_create_spatial_index(): db = Database(memory=True) spatialite = find_spatialite() db.init_spatialite(spatialite) # create a table, add a geometry column with default values table = db.create_table("locations", {"id": str, "properties": str}) table.add_geometry_column("geometry", "Point") # index it, return True assert table.create_spatial_index("geometry") assert "idx_locations_geometry" in db.table_names() # call it again, return False assert not table.create_spatial_index("geometry") # cli tests @pytest.mark.parametrize("use_spatialite_shortcut", [True, False]) def test_query_load_extension(use_spatialite_shortcut): # Without --load-extension: result = CliRunner().invoke(cli, [":memory:", "select spatialite_version()"]) assert result.exit_code == 1 assert "no such function: spatialite_version" in result.output # With --load-extension: if use_spatialite_shortcut: load_extension = "spatialite" else: load_extension = find_spatialite() result = CliRunner().invoke( cli, [ ":memory:", "select spatialite_version()", "--load-extension={}".format(load_extension), ], ) assert result.exit_code == 0, result.stdout assert ["spatialite_version()"] == list(json.loads(result.output)[0].keys()) def test_cli_create_spatialite(tmpdir): # sqlite-utils create test.db --init-spatialite db_path = tmpdir / "created.db" result = CliRunner().invoke( cli, ["create-database", str(db_path), "--init-spatialite"] ) assert result.exit_code == 0 assert db_path.exists() assert db_path.read_binary()[:16] == b"SQLite format 3\x00" db = Database(str(db_path)) assert "spatial_ref_sys" in db.table_names() def test_cli_add_geometry_column(tmpdir): # create a rowid table with one column db_path = tmpdir / "spatial.db" db = Database(str(db_path)) db.init_spatialite() table = db["locations"].create({"name": str}) result = CliRunner().invoke( cli, [ "add-geometry-column", str(db_path), table.name, "geometry", "--type", "POINT", ], ) assert result.exit_code == 0 assert db["geometry_columns"].get(["locations", "geometry"]) == { "f_table_name": "locations", "f_geometry_column": "geometry", "geometry_type": 1, # point "coord_dimension": 2, "srid": 4326, "spatial_index_enabled": 0, } def test_cli_add_geometry_column_options(tmpdir): # create a rowid table with one column db_path = tmpdir / "spatial.db" db = Database(str(db_path)) db.init_spatialite() table = db["locations"].create({"name": str}) result = CliRunner().invoke( cli, [ "add-geometry-column", str(db_path), table.name, "geometry", "-t", "POLYGON", "--srid", "3857", # https://epsg.io/3857 "--not-null", ], ) assert result.exit_code == 0 assert db["geometry_columns"].get(["locations", "geometry"]) == { "f_table_name": "locations", "f_geometry_column": "geometry", "geometry_type": 3, # polygon "coord_dimension": 2, "srid": 3857, "spatial_index_enabled": 0, } column = table.columns[1] assert column.notnull def test_cli_add_geometry_column_invalid_type(tmpdir): # create a rowid table with one column db_path = tmpdir / "spatial.db" db = Database(str(db_path)) db.init_spatialite() table = db["locations"].create({"name": str}) result = CliRunner().invoke( cli, [ "add-geometry-column", str(db_path), table.name, "geometry", "--type", "NOT-A-TYPE", ], ) assert 2 == result.exit_code def test_cli_create_spatial_index(tmpdir): # create a rowid table with one column db_path = tmpdir / "spatial.db" db = Database(str(db_path)) db.init_spatialite() table = db["locations"].create({"name": str}) table.add_geometry_column("geometry", "POINT") result = CliRunner().invoke( cli, ["create-spatial-index", str(db_path), table.name, "geometry"] ) assert result.exit_code == 0 assert "idx_locations_geometry" in db.table_names() sqlite-utils-3.35.2/tests/test_hypothesis.py000066400000000000000000000020371452131415600212210ustar00rootroot00000000000000from hypothesis import given import hypothesis.strategies as st import sqlite_utils # SQLite integers are -(2^63) to 2^63 - 1 @given(st.integers(-9223372036854775808, 9223372036854775807)) def test_roundtrip_integers(integer): db = sqlite_utils.Database(memory=True) row = { "integer": integer, } db["test"].insert(row) assert list(db["test"].rows) == [row] @given(st.text()) def test_roundtrip_text(text): db = sqlite_utils.Database(memory=True) row = { "text": text, } db["test"].insert(row) assert list(db["test"].rows) == [row] @given(st.binary(max_size=1024 * 1024)) def test_roundtrip_binary(binary): db = sqlite_utils.Database(memory=True) row = { "binary": binary, } db["test"].insert(row) assert list(db["test"].rows) == [row] @given(st.floats(allow_nan=False)) def test_roundtrip_floats(floats): db = sqlite_utils.Database(memory=True) row = { "floats": floats, } db["test"].insert(row) assert list(db["test"].rows) == [row] sqlite-utils-3.35.2/tests/test_insert_files.py000066400000000000000000000127101452131415600215070ustar00rootroot00000000000000from sqlite_utils import cli, Database from click.testing import CliRunner import os import pathlib import pytest import sys @pytest.mark.parametrize("silent", (False, True)) def test_insert_files(silent): runner = CliRunner() with runner.isolated_filesystem(): tmpdir = pathlib.Path(".") db_path = str(tmpdir / "files.db") (tmpdir / "one.txt").write_text("This is file one", "utf-8") (tmpdir / "two.txt").write_text("Two is shorter", "utf-8") (tmpdir / "nested").mkdir() (tmpdir / "nested" / "three.zz.txt").write_text("Three is nested", "utf-8") coltypes = ( "name", "path", "fullpath", "sha256", "md5", "mode", "content", "content_text", "mtime", "ctime", "mtime_int", "ctime_int", "mtime_iso", "ctime_iso", "size", "suffix", "stem", ) cols = [] for coltype in coltypes: cols += ["-c", "{}:{}".format(coltype, coltype)] result = runner.invoke( cli.cli, ["insert-files", db_path, "files", str(tmpdir)] + cols + ["--pk", "path"] + (["--silent"] if silent else []), catch_exceptions=False, ) assert result.exit_code == 0, result.stdout db = Database(db_path) rows_by_path = {r["path"]: r for r in db["files"].rows} one, two, three = ( rows_by_path["one.txt"], rows_by_path["two.txt"], rows_by_path[os.path.join("nested", "three.zz.txt")], ) assert { "content": b"This is file one", "content_text": "This is file one", "md5": "556dfb57fce9ca301f914e2273adf354", "name": "one.txt", "path": "one.txt", "sha256": "e34138f26b5f7368f298b4e736fea0aad87ddec69fbd04dc183b20f4d844bad5", "size": 16, "stem": "one", "suffix": ".txt", }.items() <= one.items() assert { "content": b"Two is shorter", "content_text": "Two is shorter", "md5": "f86f067b083af1911043eb215e74ac70", "name": "two.txt", "path": "two.txt", "sha256": "9368988ed16d4a2da0af9db9b686d385b942cb3ffd4e013f43aed2ec041183d9", "size": 14, "stem": "two", "suffix": ".txt", }.items() <= two.items() assert { "content": b"Three is nested", "content_text": "Three is nested", "md5": "12580f341781f5a5b589164d3cd39523", "name": "three.zz.txt", "path": os.path.join("nested", "three.zz.txt"), "sha256": "6dd45aaaaa6b9f96af19363a92c8fca5d34791d3c35c44eb19468a6a862cc8cd", "size": 15, "stem": "three.zz", "suffix": ".txt", }.items() <= three.items() # Assert the other int/str/float columns exist and are of the right types expected_types = { "ctime": float, "ctime_int": int, "ctime_iso": str, "mtime": float, "mtime_int": int, "mtime_iso": str, "mode": int, "fullpath": str, "content": bytes, "content_text": str, "stem": str, "suffix": str, } for colname, expected_type in expected_types.items(): for row in (one, two, three): assert isinstance(row[colname], expected_type) @pytest.mark.parametrize( "use_text,encoding,input,expected", ( (False, None, "hello world", b"hello world"), (True, None, "hello world", "hello world"), (False, None, b"S\xe3o Paulo", b"S\xe3o Paulo"), (True, "latin-1", b"S\xe3o Paulo", "S\xe3o Paulo"), ), ) def test_insert_files_stdin(use_text, encoding, input, expected): runner = CliRunner() with runner.isolated_filesystem(): tmpdir = pathlib.Path(".") db_path = str(tmpdir / "files.db") args = ["insert-files", db_path, "files", "-", "--name", "stdin-name"] if use_text: args += ["--text"] if encoding is not None: args += ["--encoding", encoding] result = runner.invoke( cli.cli, args, catch_exceptions=False, input=input, ) assert result.exit_code == 0, result.stdout db = Database(db_path) row = list(db["files"].rows)[0] key = "content" if use_text: key = "content_text" assert {"path": "stdin-name", key: expected}.items() <= row.items() @pytest.mark.skipif( sys.platform.startswith("win"), reason="Windows has a different way of handling default encodings", ) def test_insert_files_bad_text_encoding_error(): runner = CliRunner() with runner.isolated_filesystem(): tmpdir = pathlib.Path(".") latin = tmpdir / "latin.txt" latin.write_bytes(b"S\xe3o Paulo") db_path = str(tmpdir / "files.db") result = runner.invoke( cli.cli, ["insert-files", db_path, "files", str(latin), "--text"], catch_exceptions=False, ) assert result.exit_code == 1, result.output assert result.output.strip().startswith( "Error: Could not read file '{}' as text".format(str(latin.resolve())) ) sqlite-utils-3.35.2/tests/test_introspect.py000066400000000000000000000236341452131415600212220ustar00rootroot00000000000000from sqlite_utils.db import Index, View, Database, XIndex, XIndexColumn import pytest def test_table_names(existing_db): assert ["foo"] == existing_db.table_names() def test_view_names(fresh_db): fresh_db.create_view("foo_view", "select 1") assert ["foo_view"] == fresh_db.view_names() def test_table_names_fts4(existing_db): existing_db["woo"].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS4" ) existing_db["woo2"].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS5" ) assert ["woo_fts"] == existing_db.table_names(fts4=True) assert ["woo2_fts"] == existing_db.table_names(fts5=True) def test_detect_fts(existing_db): existing_db["woo"].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS4" ) existing_db["woo2"].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS5" ) assert "woo_fts" == existing_db["woo"].detect_fts() assert "woo_fts" == existing_db["woo_fts"].detect_fts() assert "woo2_fts" == existing_db["woo2"].detect_fts() assert "woo2_fts" == existing_db["woo2_fts"].detect_fts() assert existing_db["foo"].detect_fts() is None @pytest.mark.parametrize("reverse_order", (True, False)) def test_detect_fts_similar_tables(fresh_db, reverse_order): # https://github.com/simonw/sqlite-utils/issues/434 table1, table2 = ("demo", "demo2") if reverse_order: table1, table2 = table2, table1 fresh_db[table1].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS4" ) fresh_db[table2].insert({"title": "Hello"}).enable_fts( ["title"], fts_version="FTS4" ) assert fresh_db[table1].detect_fts() == "{}_fts".format(table1) assert fresh_db[table2].detect_fts() == "{}_fts".format(table2) def test_tables(existing_db): assert len(existing_db.tables) == 1 assert existing_db.tables[0].name == "foo" def test_views(fresh_db): fresh_db.create_view("foo_view", "select 1") assert len(fresh_db.views) == 1 view = fresh_db.views[0] assert isinstance(view, View) assert view.name == "foo_view" assert repr(view) == "" assert view.columns_dict == {"1": str} def test_count(existing_db): assert existing_db["foo"].count == 3 assert existing_db["foo"].count_where() == 3 assert existing_db["foo"].execute_count() == 3 def test_count_where(existing_db): assert existing_db["foo"].count_where("text != ?", ["two"]) == 2 assert existing_db["foo"].count_where("text != :t", {"t": "two"}) == 2 def test_columns(existing_db): table = existing_db["foo"] assert [{"name": "text", "type": "TEXT"}] == [ {"name": col.name, "type": col.type} for col in table.columns ] def test_table_schema(existing_db): assert existing_db["foo"].schema == "CREATE TABLE foo (text TEXT)" def test_database_schema(existing_db): assert existing_db.schema == "CREATE TABLE foo (text TEXT);" def test_table_repr(fresh_db): table = fresh_db["dogs"].insert({"name": "Cleo", "age": 4}) assert "
" == repr(table) assert "
" == repr(fresh_db["cats"]) def test_indexes(fresh_db): fresh_db.executescript( """ create table Gosh (c1 text, c2 text, c3 text); create index Gosh_c1 on Gosh(c1); create index Gosh_c2c3 on Gosh(c2, c3); """ ) assert [ Index( seq=0, name="Gosh_c2c3", unique=0, origin="c", partial=0, columns=["c2", "c3"], ), Index(seq=1, name="Gosh_c1", unique=0, origin="c", partial=0, columns=["c1"]), ] == fresh_db["Gosh"].indexes def test_xindexes(fresh_db): fresh_db.executescript( """ create table Gosh (c1 text, c2 text, c3 text); create index Gosh_c1 on Gosh(c1); create index Gosh_c2c3 on Gosh(c2, c3 desc); """ ) assert fresh_db["Gosh"].xindexes == [ XIndex( name="Gosh_c2c3", columns=[ XIndexColumn(seqno=0, cid=1, name="c2", desc=0, coll="BINARY", key=1), XIndexColumn(seqno=1, cid=2, name="c3", desc=1, coll="BINARY", key=1), XIndexColumn(seqno=2, cid=-1, name=None, desc=0, coll="BINARY", key=0), ], ), XIndex( name="Gosh_c1", columns=[ XIndexColumn(seqno=0, cid=0, name="c1", desc=0, coll="BINARY", key=1), XIndexColumn(seqno=1, cid=-1, name=None, desc=0, coll="BINARY", key=0), ], ), ] @pytest.mark.parametrize( "column,expected_table_guess", ( ("author", "authors"), ("author_id", "authors"), ("authors", "authors"), ("genre", "genre"), ("genre_id", "genre"), ), ) def test_guess_foreign_table(fresh_db, column, expected_table_guess): fresh_db.create_table("authors", {"name": str}) fresh_db.create_table("genre", {"name": str}) assert expected_table_guess == fresh_db["books"].guess_foreign_table(column) @pytest.mark.parametrize( "pk,expected", ((None, ["rowid"]), ("id", ["id"]), (["id", "id2"], ["id", "id2"])) ) def test_pks(fresh_db, pk, expected): fresh_db["foo"].insert_all([{"id": 1, "id2": 2}], pk=pk) assert expected == fresh_db["foo"].pks def test_triggers_and_triggers_dict(fresh_db): assert [] == fresh_db.triggers authors = fresh_db["authors"] authors.insert_all( [ {"name": "Frank Herbert", "famous_works": "Dune"}, {"name": "Neal Stephenson", "famous_works": "Cryptonomicon"}, ] ) fresh_db["other"].insert({"foo": "bar"}) assert authors.triggers == [] assert authors.triggers_dict == {} assert fresh_db["other"].triggers == [] assert fresh_db.triggers_dict == {} authors.enable_fts( ["name", "famous_works"], fts_version="FTS4", create_triggers=True ) expected_triggers = { ("authors_ai", "authors"), ("authors_ad", "authors"), ("authors_au", "authors"), } assert expected_triggers == {(t.name, t.table) for t in fresh_db.triggers} assert expected_triggers == { (t.name, t.table) for t in fresh_db["authors"].triggers } expected_triggers = { "authors_ai": ( "CREATE TRIGGER [authors_ai] AFTER INSERT ON [authors] BEGIN\n" " INSERT INTO [authors_fts] (rowid, [name], [famous_works]) VALUES (new.rowid, new.[name], new.[famous_works]);\n" "END" ), "authors_ad": ( "CREATE TRIGGER [authors_ad] AFTER DELETE ON [authors] BEGIN\n" " INSERT INTO [authors_fts] ([authors_fts], rowid, [name], [famous_works]) VALUES('delete', old.rowid, old.[name], old.[famous_works]);\n" "END" ), "authors_au": ( "CREATE TRIGGER [authors_au] AFTER UPDATE ON [authors] BEGIN\n" " INSERT INTO [authors_fts] ([authors_fts], rowid, [name], [famous_works]) VALUES('delete', old.rowid, old.[name], old.[famous_works]);\n" " INSERT INTO [authors_fts] (rowid, [name], [famous_works]) VALUES (new.rowid, new.[name], new.[famous_works]);\nEND" ), } assert authors.triggers_dict == expected_triggers assert fresh_db["other"].triggers == [] assert fresh_db["other"].triggers_dict == {} assert fresh_db.triggers_dict == expected_triggers def test_has_counts_triggers(fresh_db): authors = fresh_db["authors"] authors.insert({"name": "Frank Herbert"}) assert not authors.has_counts_triggers authors.enable_counts() assert authors.has_counts_triggers @pytest.mark.parametrize( "sql,expected_name,expected_using", [ ( """ CREATE VIRTUAL TABLE foo USING FTS5(name) """, "foo", "FTS5", ), ( """ CREATE VIRTUAL TABLE "foo" USING FTS4(name) """, "foo", "FTS4", ), ( """ CREATE VIRTUAL TABLE IF NOT EXISTS `foo` USING FTS4(name) """, "foo", "FTS4", ), ( """ CREATE VIRTUAL TABLE IF NOT EXISTS `foo` USING fts5(name) """, "foo", "FTS5", ), ( """ CREATE TABLE IF NOT EXISTS `foo` (id integer primary key) """, "foo", None, ), ], ) def test_virtual_table_using(fresh_db, sql, expected_name, expected_using): fresh_db.execute(sql) assert fresh_db[expected_name].virtual_table_using == expected_using def test_use_rowid(fresh_db): fresh_db["rowid_table"].insert({"name": "Cleo"}) fresh_db["regular_table"].insert({"id": 1, "name": "Cleo"}, pk="id") assert fresh_db["rowid_table"].use_rowid assert not fresh_db["regular_table"].use_rowid @pytest.mark.skipif( not Database(memory=True).supports_strict, reason="Needs SQLite version that supports strict", ) @pytest.mark.parametrize( "create_table,expected_strict", ( ("create table t (id integer) strict", True), ("create table t (id integer) STRICT", True), ("create table t (id integer primary key) StriCt, WITHOUT ROWID", True), ("create table t (id integer primary key) WITHOUT ROWID", False), ("create table t (id integer)", False), ), ) def test_table_strict(fresh_db, create_table, expected_strict): fresh_db.execute(create_table) table = fresh_db["t"] assert table.strict == expected_strict @pytest.mark.parametrize( "value", ( 1, 1.3, "foo", True, b"binary", ), ) def test_table_default_values(fresh_db, value): fresh_db["default_values"].insert( {"nodefault": 1, "value": value}, defaults={"value": value} ) default_values = fresh_db["default_values"].default_values assert default_values == {"value": value} sqlite-utils-3.35.2/tests/test_lookup.py000066400000000000000000000114741452131415600203400ustar00rootroot00000000000000from sqlite_utils.db import Index import pytest def test_lookup_new_table(fresh_db): species = fresh_db["species"] palm_id = species.lookup({"name": "Palm"}) oak_id = species.lookup({"name": "Oak"}) cherry_id = species.lookup({"name": "Cherry"}) assert palm_id == species.lookup({"name": "Palm"}) assert oak_id == species.lookup({"name": "Oak"}) assert cherry_id == species.lookup({"name": "Cherry"}) assert palm_id != oak_id != cherry_id # Ensure the correct indexes were created assert [ Index( seq=0, name="idx_species_name", unique=1, origin="c", partial=0, columns=["name"], ) ] == species.indexes def test_lookup_new_table_compound_key(fresh_db): species = fresh_db["species"] palm_id = species.lookup({"name": "Palm", "type": "Tree"}) oak_id = species.lookup({"name": "Oak", "type": "Tree"}) assert palm_id == species.lookup({"name": "Palm", "type": "Tree"}) assert oak_id == species.lookup({"name": "Oak", "type": "Tree"}) assert [ Index( seq=0, name="idx_species_name_type", unique=1, origin="c", partial=0, columns=["name", "type"], ) ] == species.indexes def test_lookup_adds_unique_constraint_to_existing_table(fresh_db): species = fresh_db.table("species", pk="id") palm_id = species.insert({"name": "Palm"}).last_pk species.insert({"name": "Oak"}) assert [] == species.indexes assert palm_id == species.lookup({"name": "Palm"}) assert [ Index( seq=0, name="idx_species_name", unique=1, origin="c", partial=0, columns=["name"], ) ] == species.indexes def test_lookup_fails_if_constraint_cannot_be_added(fresh_db): species = fresh_db.table("species", pk="id") species.insert_all([{"id": 1, "name": "Palm"}, {"id": 2, "name": "Palm"}]) # This will fail because the name column is not unique with pytest.raises(Exception, match="UNIQUE constraint failed"): species.lookup({"name": "Palm"}) def test_lookup_with_extra_values(fresh_db): species = fresh_db["species"] id = species.lookup({"name": "Palm", "type": "Tree"}, {"first_seen": "2020-01-01"}) assert species.get(id) == { "id": 1, "name": "Palm", "type": "Tree", "first_seen": "2020-01-01", } # A subsequent lookup() should ignore the second dictionary id2 = species.lookup({"name": "Palm", "type": "Tree"}, {"first_seen": "2021-02-02"}) assert id2 == id assert species.get(id2) == { "id": 1, "name": "Palm", "type": "Tree", "first_seen": "2020-01-01", } def test_lookup_with_extra_insert_parameters(fresh_db): other_table = fresh_db["other_table"] other_table.insert({"id": 1, "name": "Name"}, pk="id") species = fresh_db["species"] id = species.lookup( {"name": "Palm", "type": "Tree"}, { "first_seen": "2020-01-01", "make_not_null": 1, "fk_to_other": 1, "default_is_dog": "cat", "extract_this": "This is extracted", "convert_to_upper": "upper", "make_this_integer": "2", "this_at_front": 1, }, pk="renamed_id", foreign_keys=(("fk_to_other", "other_table", "id"),), column_order=("this_at_front",), not_null={"make_not_null"}, defaults={"default_is_dog": "dog"}, extracts=["extract_this"], conversions={"convert_to_upper": "upper(?)"}, columns={"make_this_integer": int}, ) assert species.schema == ( "CREATE TABLE [species] (\n" " [renamed_id] INTEGER PRIMARY KEY,\n" " [this_at_front] INTEGER,\n" " [name] TEXT,\n" " [type] TEXT,\n" " [first_seen] TEXT,\n" " [make_not_null] INTEGER NOT NULL,\n" " [fk_to_other] INTEGER REFERENCES [other_table]([id]),\n" " [default_is_dog] TEXT DEFAULT 'dog',\n" " [extract_this] INTEGER REFERENCES [extract_this]([id]),\n" " [convert_to_upper] TEXT,\n" " [make_this_integer] INTEGER\n" ")" ) assert species.get(id) == { "renamed_id": id, "this_at_front": 1, "name": "Palm", "type": "Tree", "first_seen": "2020-01-01", "make_not_null": 1, "fk_to_other": 1, "default_is_dog": "cat", "extract_this": 1, "convert_to_upper": "UPPER", "make_this_integer": 2, } assert species.indexes == [ Index( seq=0, name="idx_species_name_type", unique=1, origin="c", partial=0, columns=["name", "type"], ) ] sqlite-utils-3.35.2/tests/test_m2m.py000066400000000000000000000166651452131415600175310ustar00rootroot00000000000000from sqlite_utils.db import ForeignKey, NoObviousTable import pytest def test_insert_m2m_single(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo"}, pk="id").m2m( "humans", {"id": 1, "name": "Natalie D"}, pk="id" ) assert {"dogs_humans", "humans", "dogs"} == set(fresh_db.table_names()) humans = fresh_db["humans"] dogs_humans = fresh_db["dogs_humans"] assert [{"id": 1, "name": "Natalie D"}] == list(humans.rows) assert [{"humans_id": 1, "dogs_id": 1}] == list(dogs_humans.rows) def test_insert_m2m_alter(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo"}, pk="id").m2m( "humans", {"id": 1, "name": "Natalie D"}, pk="id" ) dogs.update(1).m2m( "humans", {"id": 2, "name": "Simon W", "nerd": True}, pk="id", alter=True ) assert list(fresh_db["humans"].rows) == [ {"id": 1, "name": "Natalie D", "nerd": None}, {"id": 2, "name": "Simon W", "nerd": 1}, ] assert list(fresh_db["dogs_humans"].rows) == [ {"humans_id": 1, "dogs_id": 1}, {"humans_id": 2, "dogs_id": 1}, ] def test_insert_m2m_list(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo"}, pk="id").m2m( "humans", [{"id": 1, "name": "Natalie D"}, {"id": 2, "name": "Simon W"}], pk="id", ) assert {"dogs", "humans", "dogs_humans"} == set(fresh_db.table_names()) humans = fresh_db["humans"] dogs_humans = fresh_db["dogs_humans"] assert [{"humans_id": 1, "dogs_id": 1}, {"humans_id": 2, "dogs_id": 1}] == list( dogs_humans.rows ) assert [{"id": 1, "name": "Natalie D"}, {"id": 2, "name": "Simon W"}] == list( humans.rows ) assert [ ForeignKey( table="dogs_humans", column="dogs_id", other_table="dogs", other_column="id" ), ForeignKey( table="dogs_humans", column="humans_id", other_table="humans", other_column="id", ), ] == dogs_humans.foreign_keys def test_insert_m2m_iterable(fresh_db): iterable_records = ({"id": 1, "name": "Phineas"}, {"id": 2, "name": "Ferb"}) def iterable(): for record in iterable_records: yield record platypuses = fresh_db["platypuses"] platypuses.insert({"id": 1, "name": "Perry"}, pk="id").m2m( "humans", iterable(), pk="id", ) assert {"platypuses", "humans", "humans_platypuses"} == set(fresh_db.table_names()) humans = fresh_db["humans"] humans_platypuses = fresh_db["humans_platypuses"] assert [ {"humans_id": 1, "platypuses_id": 1}, {"humans_id": 2, "platypuses_id": 1}, ] == list(humans_platypuses.rows) assert [{"id": 1, "name": "Phineas"}, {"id": 2, "name": "Ferb"}] == list( humans.rows ) assert [ ForeignKey( table="humans_platypuses", column="platypuses_id", other_table="platypuses", other_column="id", ), ForeignKey( table="humans_platypuses", column="humans_id", other_table="humans", other_column="id", ), ] == humans_platypuses.foreign_keys def test_m2m_with_table_objects(fresh_db): dogs = fresh_db.table("dogs", pk="id") humans = fresh_db.table("humans", pk="id") dogs.insert({"id": 1, "name": "Cleo"}).m2m( humans, [{"id": 1, "name": "Natalie D"}, {"id": 2, "name": "Simon W"}] ) expected_tables = {"dogs", "humans", "dogs_humans"} assert expected_tables == set(fresh_db.table_names()) assert dogs.count == 1 assert humans.count == 2 assert fresh_db["dogs_humans"].count == 2 def test_m2m_lookup(fresh_db): people = fresh_db.table("people", pk="id") people.insert({"name": "Wahyu"}).m2m("tags", lookup={"tag": "Coworker"}) people_tags = fresh_db["people_tags"] tags = fresh_db["tags"] assert people_tags.exists() assert tags.exists() assert [ ForeignKey( table="people_tags", column="people_id", other_table="people", other_column="id", ), ForeignKey( table="people_tags", column="tags_id", other_table="tags", other_column="id" ), ] == people_tags.foreign_keys assert [{"people_id": 1, "tags_id": 1}] == list(people_tags.rows) assert [{"id": 1, "name": "Wahyu"}] == list(people.rows) assert [{"id": 1, "tag": "Coworker"}] == list(tags.rows) def test_m2m_requires_either_records_or_lookup(fresh_db): people = fresh_db.table("people", pk="id").insert({"name": "Wahyu"}) with pytest.raises(AssertionError): people.m2m("tags") with pytest.raises(AssertionError): people.m2m("tags", {"tag": "hello"}, lookup={"foo": "bar"}) def test_m2m_explicit_table_name_argument(fresh_db): people = fresh_db.table("people", pk="id") people.insert({"name": "Wahyu"}).m2m( "tags", lookup={"tag": "Coworker"}, m2m_table="tagged" ) assert fresh_db["tags"].exists assert fresh_db["tagged"].exists assert not fresh_db["people_tags"].exists() def test_m2m_table_candidates(fresh_db): fresh_db.create_table("one", {"id": int, "name": str}, pk="id") fresh_db.create_table("two", {"id": int, "name": str}, pk="id") fresh_db.create_table("three", {"id": int, "name": str}, pk="id") # No candidates at first assert [] == fresh_db.m2m_table_candidates("one", "two") # Create a candidate fresh_db.create_table( "one_m2m_two", {"one_id": int, "two_id": int}, foreign_keys=["one_id", "two_id"] ) assert ["one_m2m_two"] == fresh_db.m2m_table_candidates("one", "two") # Add another table and there should be two candidates fresh_db.create_table( "one_m2m_two_and_three", {"one_id": int, "two_id": int, "three_id": int}, foreign_keys=["one_id", "two_id", "three_id"], ) assert {"one_m2m_two", "one_m2m_two_and_three"} == set( fresh_db.m2m_table_candidates("one", "two") ) def test_uses_existing_m2m_table_if_exists(fresh_db): # Code should look for an existing table with fks to both tables # and use that if it exists. people = fresh_db.create_table("people", {"id": int, "name": str}, pk="id") fresh_db["tags"].lookup({"tag": "Coworker"}) fresh_db.create_table( "tagged", {"people_id": int, "tags_id": int}, foreign_keys=["people_id", "tags_id"], ) people.insert({"name": "Wahyu"}).m2m("tags", lookup={"tag": "Coworker"}) assert fresh_db["tags"].exists() assert fresh_db["tagged"].exists() assert not fresh_db["people_tags"].exists() assert not fresh_db["tags_people"].exists() assert [{"people_id": 1, "tags_id": 1}] == list(fresh_db["tagged"].rows) def test_requires_explicit_m2m_table_if_multiple_options(fresh_db): # If the code scans for m2m tables and finds more than one candidate # it should require that the m2m_table=x argument is used people = fresh_db.create_table("people", {"id": int, "name": str}, pk="id") fresh_db["tags"].lookup({"tag": "Coworker"}) fresh_db.create_table( "tagged", {"people_id": int, "tags_id": int}, foreign_keys=["people_id", "tags_id"], ) fresh_db.create_table( "tagged2", {"people_id": int, "tags_id": int}, foreign_keys=["people_id", "tags_id"], ) with pytest.raises(NoObviousTable): people.insert({"name": "Wahyu"}).m2m("tags", lookup={"tag": "Coworker"}) sqlite-utils-3.35.2/tests/test_plugins.py000066400000000000000000000044101452131415600205000ustar00rootroot00000000000000from click.testing import CliRunner import click import importlib from sqlite_utils import cli, Database, hookimpl, plugins def test_register_commands(): importlib.reload(cli) assert plugins.get_plugins() == [] class HelloWorldPlugin: __name__ = "HelloWorldPlugin" @hookimpl def register_commands(self, cli): @cli.command(name="hello-world") def hello_world(): "Print hello world" click.echo("Hello world!") try: plugins.pm.register(HelloWorldPlugin(), name="HelloWorldPlugin") importlib.reload(cli) assert plugins.get_plugins() == [ {"name": "HelloWorldPlugin", "hooks": ["register_commands"]} ] runner = CliRunner() result = runner.invoke(cli.cli, ["hello-world"]) assert result.exit_code == 0 assert result.output == "Hello world!\n" finally: plugins.pm.unregister(name="HelloWorldPlugin") importlib.reload(cli) assert plugins.get_plugins() == [] def test_prepare_connection(): importlib.reload(cli) assert plugins.get_plugins() == [] class HelloFunctionPlugin: __name__ = "HelloFunctionPlugin" @hookimpl def prepare_connection(self, conn): conn.create_function("hello", 1, lambda name: f"Hello, {name}!") db = Database(memory=True) def _functions(db): return [ row[0] for row in db.execute( "select distinct name from pragma_function_list order by 1" ).fetchall() ] assert "hello" not in _functions(db) try: plugins.pm.register(HelloFunctionPlugin(), name="HelloFunctionPlugin") assert plugins.get_plugins() == [ {"name": "HelloFunctionPlugin", "hooks": ["prepare_connection"]} ] db = Database(memory=True) assert "hello" in _functions(db) result = db.execute('select hello("world")').fetchone()[0] assert result == "Hello, world!" # Test execute_plugins=False db2 = Database(memory=True, execute_plugins=False) assert "hello" not in _functions(db2) finally: plugins.pm.unregister(name="HelloFunctionPlugin") assert plugins.get_plugins() == [] sqlite-utils-3.35.2/tests/test_query.py000066400000000000000000000012161452131415600201650ustar00rootroot00000000000000import types def test_query(fresh_db): fresh_db["dogs"].insert_all([{"name": "Cleo"}, {"name": "Pancakes"}]) results = fresh_db.query("select * from dogs order by name desc") assert isinstance(results, types.GeneratorType) assert list(results) == [{"name": "Pancakes"}, {"name": "Cleo"}] def test_execute_returning_dicts(fresh_db): # Like db.query() but returns a list, included for backwards compatibility # see https://github.com/simonw/sqlite-utils/issues/290 fresh_db["test"].insert({"id": 1, "bar": 2}, pk="id") assert fresh_db.execute_returning_dicts("select * from test") == [ {"id": 1, "bar": 2} ] sqlite-utils-3.35.2/tests/test_recipes.py000066400000000000000000000072621452131415600204610ustar00rootroot00000000000000from sqlite_utils import recipes from sqlite_utils.utils import sqlite3 import json import pytest @pytest.fixture def dates_db(fresh_db): fresh_db["example"].insert_all( [ {"id": 1, "dt": "5th October 2019 12:04"}, {"id": 2, "dt": "6th October 2019 00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ], pk="id", ) return fresh_db def test_parsedate(dates_db): dates_db["example"].convert("dt", recipes.parsedate) assert list(dates_db["example"].rows) == [ {"id": 1, "dt": "2019-10-05"}, {"id": 2, "dt": "2019-10-06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ] def test_parsedatetime(dates_db): dates_db["example"].convert("dt", recipes.parsedatetime) assert list(dates_db["example"].rows) == [ {"id": 1, "dt": "2019-10-05T12:04:00"}, {"id": 2, "dt": "2019-10-06T00:05:06"}, {"id": 3, "dt": ""}, {"id": 4, "dt": None}, ] @pytest.mark.parametrize( "recipe,kwargs,expected", ( ("parsedate", {}, "2005-03-04"), ("parsedate", {"dayfirst": True}, "2005-04-03"), ("parsedatetime", {}, "2005-03-04T00:00:00"), ("parsedatetime", {"dayfirst": True}, "2005-04-03T00:00:00"), ), ) def test_dayfirst_yearfirst(fresh_db, recipe, kwargs, expected): fresh_db["example"].insert_all( [ {"id": 1, "dt": "03/04/05"}, ], pk="id", ) fresh_db["example"].convert( "dt", lambda value: getattr(recipes, recipe)(value, **kwargs) ) assert list(fresh_db["example"].rows) == [ {"id": 1, "dt": expected}, ] @pytest.mark.parametrize("fn", ("parsedate", "parsedatetime")) @pytest.mark.parametrize("errors", (None, recipes.SET_NULL, recipes.IGNORE)) def test_dateparse_errors(fresh_db, fn, errors): fresh_db["example"].insert_all( [ {"id": 1, "dt": "invalid"}, ], pk="id", ) if errors is None: # Should raise an error with pytest.raises(sqlite3.OperationalError): fresh_db["example"].convert("dt", lambda value: getattr(recipes, fn)(value)) else: fresh_db["example"].convert( "dt", lambda value: getattr(recipes, fn)(value, errors=errors) ) rows = list(fresh_db["example"].rows) expected = [{"id": 1, "dt": None if errors is recipes.SET_NULL else "invalid"}] assert rows == expected @pytest.mark.parametrize("delimiter", [None, ";", "-"]) def test_jsonsplit(fresh_db, delimiter): fresh_db["example"].insert_all( [ {"id": 1, "tags": (delimiter or ",").join(["foo", "bar"])}, {"id": 2, "tags": (delimiter or ",").join(["bar", "baz"])}, ], pk="id", ) if delimiter is not None: def fn(value): return recipes.jsonsplit(value, delimiter=delimiter) else: fn = recipes.jsonsplit fresh_db["example"].convert("tags", fn) assert list(fresh_db["example"].rows) == [ {"id": 1, "tags": '["foo", "bar"]'}, {"id": 2, "tags": '["bar", "baz"]'}, ] @pytest.mark.parametrize( "type,expected", ( (None, ["1", "2", "3"]), (float, [1.0, 2.0, 3.0]), (int, [1, 2, 3]), ), ) def test_jsonsplit_type(fresh_db, type, expected): fresh_db["example"].insert_all( [ {"id": 1, "records": "1,2,3"}, ], pk="id", ) if type is not None: def fn(value): return recipes.jsonsplit(value, type=type) else: fn = recipes.jsonsplit fresh_db["example"].convert("records", fn) assert json.loads(fresh_db["example"].get(1)["records"]) == expected sqlite-utils-3.35.2/tests/test_recreate.py000066400000000000000000000020551452131415600206140ustar00rootroot00000000000000from sqlite_utils import Database import sqlite3 import pathlib import pytest def test_recreate_ignored_for_in_memory(): # None of these should raise an exception: Database(memory=True, recreate=False) Database(memory=True, recreate=True) Database(":memory:", recreate=False) Database(":memory:", recreate=True) def test_recreate_not_allowed_for_connection(): conn = sqlite3.connect(":memory:") with pytest.raises(AssertionError): Database(conn, recreate=True) @pytest.mark.parametrize( "use_path,create_file_first", [(True, True), (True, False), (False, True), (False, False)], ) def test_recreate(tmp_path, use_path, create_file_first): filepath = str(tmp_path / "data.db") if use_path: filepath = pathlib.Path(filepath) if create_file_first: db = Database(filepath) db["t1"].insert({"foo": "bar"}) assert ["t1"] == db.table_names() db.close() Database(filepath, recreate=True)["t2"].insert({"foo": "bar"}) assert ["t2"] == Database(filepath).table_names() sqlite-utils-3.35.2/tests/test_register_function.py000066400000000000000000000054441452131415600225600ustar00rootroot00000000000000# flake8: noqa import pytest import sys from unittest.mock import MagicMock, call from sqlite_utils.utils import sqlite3 def test_register_function(fresh_db): @fresh_db.register_function def reverse_string(s): return "".join(reversed(list(s))) result = fresh_db.execute('select reverse_string("hello")').fetchone()[0] assert result == "olleh" def test_register_function_custom_name(fresh_db): @fresh_db.register_function(name="revstr") def reverse_string(s): return "".join(reversed(list(s))) result = fresh_db.execute('select revstr("hello")').fetchone()[0] assert result == "olleh" def test_register_function_multiple_arguments(fresh_db): @fresh_db.register_function def a_times_b_plus_c(a, b, c): return a * b + c result = fresh_db.execute("select a_times_b_plus_c(2, 3, 4)").fetchone()[0] assert result == 10 def test_register_function_deterministic(fresh_db): @fresh_db.register_function(deterministic=True) def to_lower(s): return s.lower() result = fresh_db.execute("select to_lower('BOB')").fetchone()[0] assert result == "bob" def test_register_function_deterministic_tries_again_if_exception_raised(fresh_db): fresh_db.conn = MagicMock() fresh_db.conn.create_function = MagicMock() @fresh_db.register_function(deterministic=True) def to_lower_2(s): return s.lower() fresh_db.conn.create_function.assert_called_with( "to_lower_2", 1, to_lower_2, deterministic=True ) first = True def side_effect(*args, **kwargs): # Raise exception only first time this is called nonlocal first if first: first = False raise sqlite3.NotSupportedError() # But if sqlite3.NotSupportedError is raised, it tries again fresh_db.conn.create_function.reset_mock() fresh_db.conn.create_function.side_effect = side_effect @fresh_db.register_function(deterministic=True) def to_lower_3(s): return s.lower() # Should have been called once with deterministic=True and once without assert fresh_db.conn.create_function.call_args_list == [ call("to_lower_3", 1, to_lower_3, deterministic=True), call("to_lower_3", 1, to_lower_3), ] def test_register_function_replace(fresh_db): @fresh_db.register_function() def one(): return "one" assert "one" == fresh_db.execute("select one()").fetchone()[0] # This will silently fail to replaec the function @fresh_db.register_function() def one(): # noqa return "two" assert "one" == fresh_db.execute("select one()").fetchone()[0] # This will replace it @fresh_db.register_function(replace=True) def one(): # noqa return "two" assert "two" == fresh_db.execute("select one()").fetchone()[0] sqlite-utils-3.35.2/tests/test_rows.py000066400000000000000000000061041452131415600200130ustar00rootroot00000000000000import pytest def test_rows(existing_db): assert [{"text": "one"}, {"text": "two"}, {"text": "three"}] == list( existing_db["foo"].rows ) @pytest.mark.parametrize( "where,where_args,expected_ids", [ ("name = ?", ["Pancakes"], {2}), ("age > ?", [3], {1}), ("age > :age", {"age": 3}, {1}), ("name is not null", [], {1, 2}), ("is_good = ?", [True], {1, 2}), ], ) def test_rows_where(where, where_args, expected_ids, fresh_db): table = fresh_db["dogs"] table.insert_all( [ {"id": 1, "name": "Cleo", "age": 4, "is_good": True}, {"id": 2, "name": "Pancakes", "age": 3, "is_good": True}, ], pk="id", ) assert expected_ids == { r["id"] for r in table.rows_where(where, where_args, select="id") } @pytest.mark.parametrize( "where,order_by,expected_ids", [ (None, None, [1, 2, 3]), (None, "id desc", [3, 2, 1]), (None, "age", [3, 2, 1]), ("id > 1", "age", [3, 2]), ], ) def test_rows_where_order_by(where, order_by, expected_ids, fresh_db): table = fresh_db["dogs"] table.insert_all( [ {"id": 1, "name": "Cleo", "age": 4}, {"id": 2, "name": "Pancakes", "age": 3}, {"id": 3, "name": "Bailey", "age": 2}, ], pk="id", ) assert expected_ids == [r["id"] for r in table.rows_where(where, order_by=order_by)] @pytest.mark.parametrize( "offset,limit,expected", [ (None, 3, [1, 2, 3]), (0, 3, [1, 2, 3]), (3, 3, [4, 5, 6]), ], ) def test_rows_where_offset_limit(fresh_db, offset, limit, expected): table = fresh_db["rows"] table.insert_all([{"id": id} for id in range(1, 101)], pk="id") assert table.count == 100 assert expected == [ r["id"] for r in table.rows_where(offset=offset, limit=limit, order_by="id") ] def test_pks_and_rows_where_rowid(fresh_db): table = fresh_db["rowid_table"] table.insert_all({"number": i + 10} for i in range(3)) pks_and_rows = list(table.pks_and_rows_where()) assert pks_and_rows == [ (1, {"rowid": 1, "number": 10}), (2, {"rowid": 2, "number": 11}), (3, {"rowid": 3, "number": 12}), ] def test_pks_and_rows_where_simple_pk(fresh_db): table = fresh_db["simple_pk_table"] table.insert_all(({"id": i + 10} for i in range(3)), pk="id") pks_and_rows = list(table.pks_and_rows_where()) assert pks_and_rows == [ (10, {"id": 10}), (11, {"id": 11}), (12, {"id": 12}), ] def test_pks_and_rows_where_compound_pk(fresh_db): table = fresh_db["compound_pk_table"] table.insert_all( ({"type": "number", "number": i, "plusone": i + 1} for i in range(3)), pk=("type", "number"), ) pks_and_rows = list(table.pks_and_rows_where()) assert pks_and_rows == [ (("number", 0), {"type": "number", "number": 0, "plusone": 1}), (("number", 1), {"type": "number", "number": 1, "plusone": 2}), (("number", 2), {"type": "number", "number": 2, "plusone": 3}), ] sqlite-utils-3.35.2/tests/test_rows_from_file.py000066400000000000000000000031731452131415600220400ustar00rootroot00000000000000from sqlite_utils.utils import rows_from_file, Format, RowError from io import BytesIO, StringIO import pytest @pytest.mark.parametrize( "input,expected_format", ( (b"id,name\n1,Cleo", Format.CSV), (b"id\tname\n1\tCleo", Format.TSV), (b'[{"id": "1", "name": "Cleo"}]', Format.JSON), ), ) def test_rows_from_file_detect_format(input, expected_format): rows, format = rows_from_file(BytesIO(input)) assert format == expected_format rows_list = list(rows) assert rows_list == [{"id": "1", "name": "Cleo"}] @pytest.mark.parametrize( "ignore_extras,extras_key,expected", ( (True, None, [{"id": "1", "name": "Cleo"}]), (False, "_rest", [{"id": "1", "name": "Cleo", "_rest": ["oops"]}]), # expected of None means expect an error: (False, False, None), ), ) def test_rows_from_file_extra_fields_strategies(ignore_extras, extras_key, expected): try: rows, format = rows_from_file( BytesIO(b"id,name\r\n1,Cleo,oops"), format=Format.CSV, ignore_extras=ignore_extras, extras_key=extras_key, ) list_rows = list(rows) except RowError: if expected is None: # This is fine, return else: # We did not expect an error raise assert list_rows == expected def test_rows_from_file_error_on_string_io(): with pytest.raises(TypeError) as ex: rows_from_file(StringIO("id,name\r\n1,Cleo")) assert ex.value.args == ( "rows_from_file() requires a file-like object that supports peek(), such as io.BytesIO", ) sqlite-utils-3.35.2/tests/test_sniff.py000066400000000000000000000015701452131415600201300ustar00rootroot00000000000000from sqlite_utils import cli, Database from click.testing import CliRunner import pathlib import pytest sniff_dir = pathlib.Path(__file__).parent / "sniff" @pytest.mark.parametrize("filepath", sniff_dir.glob("example*")) def test_sniff(tmpdir, filepath): db_path = str(tmpdir / "test.db") runner = CliRunner() result = runner.invoke( cli.cli, ["insert", db_path, "creatures", str(filepath), "--sniff"], catch_exceptions=False, ) assert result.exit_code == 0, result.stdout db = Database(db_path) assert list(db["creatures"].rows) == [ {"id": "1", "species": "dog", "name": "Cleo", "age": "5"}, {"id": "2", "species": "dog", "name": "Pancakes", "age": "4"}, {"id": "3", "species": "cat", "name": "Mozie", "age": "8"}, {"id": "4", "species": "spider", "name": "Daisy, the tarantula", "age": "6"}, ] sqlite-utils-3.35.2/tests/test_suggest_column_types.py000066400000000000000000000017351452131415600233100ustar00rootroot00000000000000import pytest from collections import OrderedDict from sqlite_utils.utils import suggest_column_types @pytest.mark.parametrize( "records,types", [ ([{"a": 1}], {"a": int}), ([{"a": 1}, {"a": None}], {"a": int}), ([{"a": "baz"}], {"a": str}), ([{"a": "baz"}, {"a": None}], {"a": str}), ([{"a": 1.2}], {"a": float}), ([{"a": 1.2}, {"a": None}], {"a": float}), ([{"a": [1]}], {"a": str}), ([{"a": [1]}, {"a": None}], {"a": str}), ([{"a": (1,)}], {"a": str}), ([{"a": {"b": 1}}], {"a": str}), ([{"a": {"b": 1}}, {"a": None}], {"a": str}), ([{"a": OrderedDict({"b": 1})}], {"a": str}), ([{"a": 1}, {"a": 1.1}], {"a": float}), ([{"a": b"b"}], {"a": bytes}), ([{"a": b"b"}, {"a": None}], {"a": bytes}), ([{"a": "a", "b": None}], {"a": str, "b": str}), ], ) def test_suggest_column_types(records, types): assert types == suggest_column_types(records) sqlite-utils-3.35.2/tests/test_tracer.py000066400000000000000000000065261452131415600203110ustar00rootroot00000000000000from sqlite_utils import Database def test_tracer(): collected = [] db = Database( memory=True, tracer=lambda sql, params: collected.append((sql, params)) ) db["dogs"].insert({"name": "Cleopaws"}) db["dogs"].enable_fts(["name"]) db["dogs"].search("Cleopaws") assert collected == [ ("PRAGMA recursive_triggers=on;", None), ("select name from sqlite_master where type = 'view'", None), ("select name from sqlite_master where type = 'table'", None), ("select name from sqlite_master where type = 'view'", None), ("select name from sqlite_master where type = 'table'", None), ("select name from sqlite_master where type = 'view'", None), ("CREATE TABLE [dogs] (\n [name] TEXT\n);\n ", None), ("select name from sqlite_master where type = 'view'", None), ("INSERT INTO [dogs] ([name]) VALUES (?);", ["Cleopaws"]), ("select name from sqlite_master where type = 'view'", None), ( "CREATE VIRTUAL TABLE [dogs_fts] USING FTS5 (\n [name],\n content=[dogs]\n)", None, ), ( "INSERT INTO [dogs_fts] (rowid, [name])\n SELECT rowid, [name] FROM [dogs];", None, ), ("select name from sqlite_master where type = 'view'", None), ] def test_with_tracer(): collected = [] def tracer(sql, params): return collected.append((sql, params)) db = Database(memory=True) db["dogs"].insert({"name": "Cleopaws"}) db["dogs"].enable_fts(["name"]) assert len(collected) == 0 with db.tracer(tracer): list(db["dogs"].search("Cleopaws")) assert len(collected) == 5 assert collected == [ ("select name from sqlite_master where type = 'view'", None), ( ( "SELECT name FROM sqlite_master\n" " WHERE rootpage = 0\n" " AND (\n" " sql LIKE :like\n" " OR sql LIKE :like2\n" " OR (\n" " tbl_name = :table\n" " AND sql LIKE '%VIRTUAL TABLE%USING FTS%'\n" " )\n" " )", { "like": "%VIRTUAL TABLE%USING FTS%content=[dogs]%", "like2": '%VIRTUAL TABLE%USING FTS%content="dogs"%', "table": "dogs", }, ) ), ("select name from sqlite_master where type = 'view'", None), ("select sql from sqlite_master where name = ?", ("dogs_fts",)), ( ( "with original as (\n" " select\n" " rowid,\n" " *\n" " from [dogs]\n" ")\n" "select\n" " [original].*\n" "from\n" " [original]\n" " join [dogs_fts] on [original].rowid = [dogs_fts].rowid\n" "where\n" " [dogs_fts] match :query\n" "order by\n" " [dogs_fts].rank" ), {"query": "Cleopaws"}, ), ] # Outside the with block collected should not be appended to db["dogs"].insert({"name": "Cleopaws"}) assert len(collected) == 5 sqlite-utils-3.35.2/tests/test_transform.py000066400000000000000000000465161452131415600210470ustar00rootroot00000000000000from sqlite_utils.db import ForeignKey from sqlite_utils.utils import OperationalError import pytest @pytest.mark.parametrize( "params,expected_sql", [ # Identity transform - nothing changes ( {}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Change column type ( {"types": {"age": int}}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] INTEGER\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Rename a column ( {"rename": {"age": "dog_age"}}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [dog_age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [dog_age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Drop a column ( {"drop": ["age"]}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name])\n SELECT [rowid], [id], [name] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Convert type AND rename column ( {"types": {"age": int}, "rename": {"age": "dog_age"}}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [dog_age] INTEGER\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [dog_age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Change primary key ( {"pk": "age"}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [age] TEXT PRIMARY KEY\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Change primary key to a compound pk ( {"pk": ("age", "name")}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [age] TEXT,\n PRIMARY KEY ([age], [name])\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Remove primary key, creating a rowid table ( {"pk": None}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Keeping the table ( {"drop": ["age"], "keep_table": "kept_table"}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name])\n SELECT [rowid], [id], [name] FROM [dogs];", "ALTER TABLE [dogs] RENAME TO [kept_table];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), ], ) @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True]) def test_transform_sql_table_with_primary_key( fresh_db, params, expected_sql, use_pragma_foreign_keys ): captured = [] def tracer(sql, params): return captured.append((sql, params)) dogs = fresh_db["dogs"] if use_pragma_foreign_keys: fresh_db.conn.execute("PRAGMA foreign_keys=ON") dogs.insert({"id": 1, "name": "Cleo", "age": "5"}, pk="id") sql = dogs.transform_sql(**{**params, **{"tmp_suffix": "suffix"}}) assert sql == expected_sql # Check that .transform() runs without exceptions: with fresh_db.tracer(tracer): dogs.transform(**params) # If use_pragma_foreign_keys, check that we did the right thing if use_pragma_foreign_keys: assert ("PRAGMA foreign_keys=0;", None) in captured assert captured[-2] == ("PRAGMA foreign_key_check;", None) assert captured[-1] == ("PRAGMA foreign_keys=1;", None) else: assert ("PRAGMA foreign_keys=0;", None) not in captured assert ("PRAGMA foreign_keys=1;", None) not in captured @pytest.mark.parametrize( "params,expected_sql", [ # Identity transform - nothing changes ( {}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Change column type ( {"types": {"age": int}}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [age] INTEGER\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Rename a column ( {"rename": {"age": "dog_age"}}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER,\n [name] TEXT,\n [dog_age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [dog_age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), # Make ID a primary key ( {"pk": "id"}, [ "CREATE TABLE [dogs_new_suffix] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] TEXT\n);", "INSERT INTO [dogs_new_suffix] ([rowid], [id], [name], [age])\n SELECT [rowid], [id], [name], [age] FROM [dogs];", "DROP TABLE [dogs];", "ALTER TABLE [dogs_new_suffix] RENAME TO [dogs];", ], ), ], ) @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True]) def test_transform_sql_table_with_no_primary_key( fresh_db, params, expected_sql, use_pragma_foreign_keys ): captured = [] def tracer(sql, params): return captured.append((sql, params)) dogs = fresh_db["dogs"] if use_pragma_foreign_keys: fresh_db.conn.execute("PRAGMA foreign_keys=ON") dogs.insert({"id": 1, "name": "Cleo", "age": "5"}) sql = dogs.transform_sql(**{**params, **{"tmp_suffix": "suffix"}}) assert sql == expected_sql # Check that .transform() runs without exceptions: with fresh_db.tracer(tracer): dogs.transform(**params) # If use_pragma_foreign_keys, check that we did the right thing if use_pragma_foreign_keys: assert ("PRAGMA foreign_keys=0;", None) in captured assert captured[-2] == ("PRAGMA foreign_key_check;", None) assert captured[-1] == ("PRAGMA foreign_keys=1;", None) else: assert ("PRAGMA foreign_keys=0;", None) not in captured assert ("PRAGMA foreign_keys=1;", None) not in captured def test_transform_sql_with_no_primary_key_to_primary_key_of_id(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": "5"}) assert ( dogs.schema == "CREATE TABLE [dogs] (\n [id] INTEGER,\n [name] TEXT,\n [age] TEXT\n)" ) dogs.transform(pk="id") # Slight oddity: [dogs] becomes "dogs" during the rename: assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] TEXT\n)' ) def test_transform_rename_pk(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": "5"}, pk="id") dogs.transform(rename={"id": "pk"}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [pk] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] TEXT\n)' ) def test_transform_not_null(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": "5"}, pk="id") dogs.transform(not_null={"name"}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT NOT NULL,\n [age] TEXT\n)' ) def test_transform_remove_a_not_null(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": "5"}, not_null={"age"}, pk="id") dogs.transform(not_null={"name": True, "age": False}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT NOT NULL,\n [age] TEXT\n)' ) @pytest.mark.parametrize("not_null", [{"age"}, {"age": True}]) def test_transform_add_not_null_with_rename(fresh_db, not_null): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": "5"}, pk="id") dogs.transform(not_null=not_null, rename={"age": "dog_age"}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [dog_age] TEXT NOT NULL\n)' ) def test_transform_defaults(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": 5}, pk="id") dogs.transform(defaults={"age": 1}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] INTEGER DEFAULT 1\n)' ) def test_transform_defaults_and_rename_column(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": 5}, pk="id") dogs.transform(rename={"age": "dog_age"}, defaults={"age": 1}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [dog_age] INTEGER DEFAULT 1\n)' ) def test_remove_defaults(fresh_db): dogs = fresh_db["dogs"] dogs.insert({"id": 1, "name": "Cleo", "age": 5}, defaults={"age": 1}, pk="id") dogs.transform(defaults={"age": None}) assert ( dogs.schema == 'CREATE TABLE "dogs" (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT,\n [age] INTEGER\n)' ) @pytest.fixture def authors_db(fresh_db): books = fresh_db["books"] authors = fresh_db["authors"] authors.insert({"id": 5, "name": "Jane McGonical"}, pk="id") books.insert( {"id": 2, "title": "Reality is Broken", "author_id": 5}, foreign_keys=("author_id",), pk="id", ) return fresh_db def test_transform_foreign_keys_persist(authors_db): assert authors_db["books"].foreign_keys == [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ) ] authors_db["books"].transform(rename={"title": "book_title"}) assert authors_db["books"].foreign_keys == [ ForeignKey( table="books", column="author_id", other_table="authors", other_column="id" ) ] @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True]) def test_transform_foreign_keys_survive_renamed_column( authors_db, use_pragma_foreign_keys ): if use_pragma_foreign_keys: authors_db.conn.execute("PRAGMA foreign_keys=ON") authors_db["books"].transform(rename={"author_id": "author_id_2"}) assert authors_db["books"].foreign_keys == [ ForeignKey( table="books", column="author_id_2", other_table="authors", other_column="id", ) ] def _add_country_city_continent(db): db["country"].insert({"id": 1, "name": "France"}, pk="id") db["continent"].insert({"id": 2, "name": "Europe"}, pk="id") db["city"].insert({"id": 24, "name": "Paris"}, pk="id") _CAVEAU = { "id": 32, "name": "Caveau de la Huchette", "country": 1, "continent": 2, "city": 24, } @pytest.mark.parametrize("use_pragma_foreign_keys", [False, True]) def test_transform_drop_foreign_keys(fresh_db, use_pragma_foreign_keys): if use_pragma_foreign_keys: fresh_db.conn.execute("PRAGMA foreign_keys=ON") # Create table with three foreign keys so we can drop two of them _add_country_city_continent(fresh_db) fresh_db["places"].insert( _CAVEAU, foreign_keys=("country", "continent", "city"), ) assert fresh_db["places"].foreign_keys == [ ForeignKey( table="places", column="city", other_table="city", other_column="id" ), ForeignKey( table="places", column="continent", other_table="continent", other_column="id", ), ForeignKey( table="places", column="country", other_table="country", other_column="id" ), ] # Drop two of those foreign keys fresh_db["places"].transform(drop_foreign_keys=("country", "continent")) # Should be only one foreign key now assert fresh_db["places"].foreign_keys == [ ForeignKey(table="places", column="city", other_table="city", other_column="id") ] if use_pragma_foreign_keys: assert fresh_db.conn.execute("PRAGMA foreign_keys").fetchone()[0] def test_transform_verify_foreign_keys(fresh_db): fresh_db.conn.execute("PRAGMA foreign_keys=ON") fresh_db["authors"].insert({"id": 3, "name": "Tina"}, pk="id") fresh_db["books"].insert( {"id": 1, "title": "Book", "author_id": 3}, pk="id", foreign_keys={"author_id"} ) # Renaming the id column on authors should break everything with pytest.raises(OperationalError) as e: fresh_db["authors"].transform(rename={"id": "id2"}) assert e.value.args[0] == 'foreign key mismatch - "books" referencing "authors"' # This should have rolled us back assert ( fresh_db["authors"].schema == "CREATE TABLE [authors] (\n [id] INTEGER PRIMARY KEY,\n [name] TEXT\n)" ) assert fresh_db.conn.execute("PRAGMA foreign_keys").fetchone()[0] def test_transform_add_foreign_keys_from_scratch(fresh_db): _add_country_city_continent(fresh_db) fresh_db["places"].insert(_CAVEAU) # Should have no foreign keys assert fresh_db["places"].foreign_keys == [] # Now add them using .transform() fresh_db["places"].transform(add_foreign_keys=("country", "continent", "city")) # Should now have all three: assert fresh_db["places"].foreign_keys == [ ForeignKey( table="places", column="city", other_table="city", other_column="id" ), ForeignKey( table="places", column="continent", other_table="continent", other_column="id", ), ForeignKey( table="places", column="country", other_table="country", other_column="id" ), ] assert fresh_db["places"].schema == ( 'CREATE TABLE "places" (\n' " [id] INTEGER,\n" " [name] TEXT,\n" " [country] INTEGER REFERENCES [country]([id]),\n" " [continent] INTEGER REFERENCES [continent]([id]),\n" " [city] INTEGER REFERENCES [city]([id])\n" ")" ) @pytest.mark.parametrize( "add_foreign_keys", ( ("country", "continent"), # Fully specified ( ("country", "country", "id"), ("continent", "continent", "id"), ), ), ) def test_transform_add_foreign_keys_from_partial(fresh_db, add_foreign_keys): _add_country_city_continent(fresh_db) fresh_db["places"].insert( _CAVEAU, foreign_keys=("city",), ) # Should have one foreign keys assert fresh_db["places"].foreign_keys == [ ForeignKey(table="places", column="city", other_table="city", other_column="id") ] # Now add three more using .transform() fresh_db["places"].transform(add_foreign_keys=add_foreign_keys) # Should now have all three: assert fresh_db["places"].foreign_keys == [ ForeignKey( table="places", column="city", other_table="city", other_column="id" ), ForeignKey( table="places", column="continent", other_table="continent", other_column="id", ), ForeignKey( table="places", column="country", other_table="country", other_column="id" ), ] @pytest.mark.parametrize( "foreign_keys", ( ("country", "continent"), # Fully specified ( ("country", "country", "id"), ("continent", "continent", "id"), ), ), ) def test_transform_replace_foreign_keys(fresh_db, foreign_keys): _add_country_city_continent(fresh_db) fresh_db["places"].insert( _CAVEAU, foreign_keys=("city",), ) assert len(fresh_db["places"].foreign_keys) == 1 # Replace with two different ones fresh_db["places"].transform(foreign_keys=foreign_keys) assert fresh_db["places"].schema == ( 'CREATE TABLE "places" (\n' " [id] INTEGER,\n" " [name] TEXT,\n" " [country] INTEGER REFERENCES [country]([id]),\n" " [continent] INTEGER REFERENCES [continent]([id]),\n" " [city] INTEGER\n" ")" ) @pytest.mark.parametrize("table_type", ("id_pk", "rowid", "compound_pk")) def test_transform_preserves_rowids(fresh_db, table_type): pk = None if table_type == "id_pk": pk = "id" elif table_type == "compound_pk": pk = ("id", "name") elif table_type == "rowid": pk = None fresh_db["places"].insert_all( [ {"id": "1", "name": "Paris", "country": "France"}, {"id": "2", "name": "London", "country": "UK"}, {"id": "3", "name": "New York", "country": "USA"}, ], pk=pk, ) # Now delete and insert a row to mix up the `rowid` sequence fresh_db["places"].delete_where("id = ?", ["2"]) fresh_db["places"].insert({"id": "4", "name": "London", "country": "UK"}) previous_rows = list( tuple(row) for row in fresh_db.execute("select rowid, id, name from places") ) # Transform it fresh_db["places"].transform(column_order=("country", "name")) # Should be the same next_rows = list( tuple(row) for row in fresh_db.execute("select rowid, id, name from places") ) assert previous_rows == next_rows sqlite-utils-3.35.2/tests/test_update.py000066400000000000000000000062511452131415600203060ustar00rootroot00000000000000import collections import json import pytest from sqlite_utils.db import NotFoundError def test_update_rowid_table(fresh_db): table = fresh_db["table"] rowid = table.insert({"foo": "bar"}).last_pk table.update(rowid, {"foo": "baz"}) assert [{"foo": "baz"}] == list(table.rows) def test_update_pk_table(fresh_db): table = fresh_db["table"] pk = table.insert({"foo": "bar", "id": 5}, pk="id").last_pk assert 5 == pk table.update(pk, {"foo": "baz"}) assert [{"id": 5, "foo": "baz"}] == list(table.rows) def test_update_compound_pk_table(fresh_db): table = fresh_db["table"] pk = table.insert({"id1": 5, "id2": 3, "v": 1}, pk=("id1", "id2")).last_pk assert (5, 3) == pk table.update(pk, {"v": 2}) assert [{"id1": 5, "id2": 3, "v": 2}] == list(table.rows) @pytest.mark.parametrize( "pk,update_pk", ( (None, 2), (None, None), ("id1", None), ("id1", 4), (("id1", "id2"), None), (("id1", "id2"), 4), (("id1", "id2"), (4, 5)), ), ) def test_update_invalid_pk(fresh_db, pk, update_pk): table = fresh_db["table"] table.insert({"id1": 5, "id2": 3, "v": 1}, pk=pk).last_pk with pytest.raises(NotFoundError): table.update(update_pk, {"v": 2}) def test_update_alter(fresh_db): table = fresh_db["table"] rowid = table.insert({"foo": "bar"}).last_pk table.update(rowid, {"new_col": 1.2}, alter=True) assert [{"foo": "bar", "new_col": 1.2}] == list(table.rows) # Let's try adding three cols at once table.update( rowid, {"str_col": "str", "bytes_col": b"\xa0 has bytes", "int_col": -10}, alter=True, ) assert [ { "foo": "bar", "new_col": 1.2, "str_col": "str", "bytes_col": b"\xa0 has bytes", "int_col": -10, } ] == list(table.rows) def test_update_alter_with_invalid_column_characters(fresh_db): table = fresh_db["table"] rowid = table.insert({"foo": "bar"}).last_pk with pytest.raises(AssertionError): table.update(rowid, {"new_col[abc]": 1.2}, alter=True) def test_update_with_no_values_sets_last_pk(fresh_db): table = fresh_db.table("dogs", pk="id") table.insert_all([{"id": 1, "name": "Cleo"}, {"id": 2, "name": "Pancakes"}]) table.update(1) assert table.last_pk == 1 table.update(2) assert table.last_pk == 2 with pytest.raises(NotFoundError): table.update(3) @pytest.mark.parametrize( "data_structure", ( ["list with one item"], ["list with", "two items"], {"dictionary": "simple"}, {"dictionary": {"nested": "complex"}}, collections.OrderedDict( [ ("key1", {"nested": "complex"}), ("key2", "foo"), ] ), [{"list": "of"}, {"two": "dicts"}], ), ) def test_update_dictionaries_and_lists_as_json(fresh_db, data_structure): fresh_db["test"].insert({"id": 1, "data": ""}, pk="id") fresh_db["test"].update(1, {"data": data_structure}) row = fresh_db.execute("select id, data from test").fetchone() assert row[0] == 1 assert data_structure == json.loads(row[1]) sqlite-utils-3.35.2/tests/test_upsert.py000066400000000000000000000064501452131415600203470ustar00rootroot00000000000000from sqlite_utils.db import PrimaryKeyRequired import pytest def test_upsert(fresh_db): table = fresh_db["table"] table.insert({"id": 1, "name": "Cleo"}, pk="id") table.upsert({"id": 1, "age": 5}, pk="id", alter=True) assert list(table.rows) == [{"id": 1, "name": "Cleo", "age": 5}] assert table.last_pk == 1 def test_upsert_all(fresh_db): table = fresh_db["table"] table.upsert_all([{"id": 1, "name": "Cleo"}, {"id": 2, "name": "Nixie"}], pk="id") table.upsert_all([{"id": 1, "age": 5}, {"id": 2, "age": 5}], pk="id", alter=True) assert list(table.rows) == [ {"id": 1, "name": "Cleo", "age": 5}, {"id": 2, "name": "Nixie", "age": 5}, ] assert table.last_pk is None def test_upsert_all_single_column(fresh_db): table = fresh_db["table"] table.upsert_all([{"name": "Cleo"}], pk="name") assert list(table.rows) == [{"name": "Cleo"}] assert table.pks == ["name"] def test_upsert_all_not_null(fresh_db): # https://github.com/simonw/sqlite-utils/issues/538 fresh_db["comments"].upsert_all( [{"id": 1, "name": "Cleo"}], pk="id", not_null=["name"], ) assert list(fresh_db["comments"].rows) == [{"id": 1, "name": "Cleo"}] def test_upsert_error_if_no_pk(fresh_db): table = fresh_db["table"] with pytest.raises(PrimaryKeyRequired): table.upsert_all([{"id": 1, "name": "Cleo"}]) with pytest.raises(PrimaryKeyRequired): table.upsert({"id": 1, "name": "Cleo"}) def test_upsert_with_hash_id(fresh_db): table = fresh_db["table"] table.upsert({"foo": "bar"}, hash_id="pk") assert [{"pk": "a5e744d0164540d33b1d7ea616c28f2fa97e754a", "foo": "bar"}] == list( table.rows ) assert "a5e744d0164540d33b1d7ea616c28f2fa97e754a" == table.last_pk @pytest.mark.parametrize("hash_id", (None, "custom_id")) def test_upsert_with_hash_id_columns(fresh_db, hash_id): table = fresh_db["table"] table.upsert({"a": 1, "b": 2, "c": 3}, hash_id=hash_id, hash_id_columns=("a", "b")) assert list(table.rows) == [ { hash_id or "id": "4acc71e0547112eb432f0a36fb1924c4a738cb49", "a": 1, "b": 2, "c": 3, } ] assert table.last_pk == "4acc71e0547112eb432f0a36fb1924c4a738cb49" table.upsert({"a": 1, "b": 2, "c": 4}, hash_id=hash_id, hash_id_columns=("a", "b")) assert list(table.rows) == [ { hash_id or "id": "4acc71e0547112eb432f0a36fb1924c4a738cb49", "a": 1, "b": 2, "c": 4, } ] def test_upsert_compound_primary_key(fresh_db): table = fresh_db["table"] table.upsert_all( [ {"species": "dog", "id": 1, "name": "Cleo", "age": 4}, {"species": "cat", "id": 1, "name": "Catbag"}, ], pk=("species", "id"), ) assert table.last_pk is None table.upsert({"species": "dog", "id": 1, "age": 5}, pk=("species", "id")) assert ("dog", 1) == table.last_pk assert [ {"species": "dog", "id": 1, "name": "Cleo", "age": 5}, {"species": "cat", "id": 1, "name": "Catbag", "age": None}, ] == list(table.rows) # .upsert_all() with a single item should set .last_pk table.upsert_all([{"species": "cat", "id": 1, "age": 5}], pk=("species", "id")) assert ("cat", 1) == table.last_pk sqlite-utils-3.35.2/tests/test_utils.py000066400000000000000000000047571452131415600201750ustar00rootroot00000000000000from sqlite_utils import utils import csv import io import pytest @pytest.mark.parametrize( "input,expected,should_be_is", [ ({}, None, True), ({"foo": "bar"}, None, True), ( {"content": {"$base64": True, "encoded": "aGVsbG8="}}, {"content": b"hello"}, False, ), ], ) def test_decode_base64_values(input, expected, should_be_is): actual = utils.decode_base64_values(input) if should_be_is: assert actual is input else: assert actual == expected @pytest.mark.parametrize( "size,expected", ( (1, [["a"], ["b"], ["c"], ["d"]]), (2, [["a", "b"], ["c", "d"]]), (3, [["a", "b", "c"], ["d"]]), (4, [["a", "b", "c", "d"]]), ), ) def test_chunks(size, expected): input = ["a", "b", "c", "d"] chunks = list(map(list, utils.chunks(input, size))) assert chunks == expected def test_hash_record(): expected = "d383e7c0ba88f5ffcdd09be660de164b3847401a" assert utils.hash_record({"name": "Cleo", "twitter": "CleoPaws"}) == expected assert ( utils.hash_record( {"name": "Cleo", "twitter": "CleoPaws", "age": 7}, keys=("name", "twitter") ) == expected ) assert ( utils.hash_record({"name": "Cleo", "twitter": "CleoPaws", "age": 7}) != expected ) def test_maximize_csv_field_size_limit(): # Reset to default in case other tests have changed it csv.field_size_limit(utils.ORIGINAL_CSV_FIELD_SIZE_LIMIT) long_value = "a" * 131073 long_csv = "id,text\n1,{}".format(long_value) fp = io.BytesIO(long_csv.encode("utf-8")) # Using rows_from_file should error with pytest.raises(csv.Error): rows, _ = utils.rows_from_file(fp, utils.Format.CSV) list(rows) # But if we call maximize_csv_field_size_limit() first it should be OK: utils.maximize_csv_field_size_limit() fp2 = io.BytesIO(long_csv.encode("utf-8")) rows2, _ = utils.rows_from_file(fp2, utils.Format.CSV) rows_list2 = list(rows2) assert len(rows_list2) == 1 assert rows_list2[0]["id"] == "1" assert rows_list2[0]["text"] == long_value @pytest.mark.parametrize( "input,expected", ( ({"foo": {"bar": 1}}, {"foo_bar": 1}), ({"foo": {"bar": [1, 2, {"baz": 3}]}}, {"foo_bar": [1, 2, {"baz": 3}]}), ({"foo": {"bar": 1, "baz": {"three": 3}}}, {"foo_bar": 1, "foo_baz_three": 3}), ), ) def test_flatten(input, expected): assert utils.flatten(input) == expected sqlite-utils-3.35.2/tests/test_wal.py000066400000000000000000000013021452131415600175770ustar00rootroot00000000000000import pytest from sqlite_utils import Database @pytest.fixture def db_path_tmpdir(tmpdir): path = tmpdir / "test.db" db = Database(str(path)) return db, path, tmpdir def test_enable_disable_wal(db_path_tmpdir): db, path, tmpdir = db_path_tmpdir assert len(tmpdir.listdir()) == 1 assert "delete" == db.journal_mode assert "test.db-wal" not in [f.basename for f in tmpdir.listdir()] db.enable_wal() assert "wal" == db.journal_mode db["test"].insert({"foo": "bar"}) assert "test.db-wal" in [f.basename for f in tmpdir.listdir()] db.disable_wal() assert "delete" == db.journal_mode assert "test.db-wal" not in [f.basename for f in tmpdir.listdir()]