pax_global_header00006660000000000000000000000064140043363670014520gustar00rootroot0000000000000052 comment=4d25d82c8faa5c4f143bd163c30474bd5dd6f209 python-rocksdb-0.8.0~rc3/000077500000000000000000000000001400433636700153015ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/.github/000077500000000000000000000000001400433636700166415ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/.github/workflows/000077500000000000000000000000001400433636700206765ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/.github/workflows/build.yml000066400000000000000000000045601400433636700225250ustar00rootroot00000000000000name: "Build" on: ['push', 'pull_request'] jobs: build_wheels: name: "Build wheels" runs-on: 'ubuntu-latest' steps: - uses: actions/checkout@v2 name: "Checkout source repository" - uses: actions/setup-python@v2 name: "Set up Python 3.9" with: python-version: '3.9' - name: "Install cibuildwheel" run: | python3 -m pip install cibuildwheel==1.7.1 - name: "Build wheels" run: | python3 -m cibuildwheel --output-dir dist env: ROCKSDB_VERSION: '6.14.6' CIBW_MANYLINUX_X86_64_IMAGE: 'manylinux2014' CIBW_BUILD: 'cp37-manylinux* cp38-manylinux* cp39-manylinux*' CIBW_SKIP: '*-manylinux_i686' CIBW_TEST_REQUIRES: '.[test]' CIBW_TEST_COMMAND: 'rm {project}/rocksdb/tests/__init__.py; pytest {project}/rocksdb/tests' CIBW_BEFORE_BUILD: | yum install -y bzip2-devel lz4-devel snappy-devel zlib-devel pushd /opt git clone https://github.com/facebook/rocksdb cd rocksdb git reset --hard $ROCKSDB_VERSION CXXFLAGS='-flto -Os -s' PORTABLE=1 make shared_lib -j 4 make install-shared popd - uses: actions/upload-artifact@v2 name: "Upload build artifacts" with: path: 'dist/*.whl' build_sdist: name: "Build source distribution" runs-on: 'ubuntu-latest' steps: - uses: actions/checkout@v2 name: "Checkout source repository" - uses: actions/setup-python@v2 name: "Set up Python 3.9" with: python-version: '3.9' - name: "Build sdist" run: | python3 setup.py sdist - uses: actions/upload-artifact@v2 name: "Upload build artifacts" with: path: 'dist/*.tar.gz' upload_pypi: name: "Upload packages" needs: ['build_wheels', 'build_sdist'] runs-on: 'ubuntu-latest' if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') steps: - uses: actions/download-artifact@v2 name: "Download artifacts" with: name: 'artifact' path: 'dist' - uses: pypa/gh-action-pypi-publish@master name: "Publish built packages" with: user: '__token__' password: "${{ secrets.PYPI_API_TOKEN }}" python-rocksdb-0.8.0~rc3/.gitignore000066400000000000000000000001461400433636700172720ustar00rootroot00000000000000build docs/_build .pytest_cache .eggs/ .tox/ *.egg-info/ *.pyc *.so __pycache__ rocksdb/_rocksdb.cpp python-rocksdb-0.8.0~rc3/.travis.yml000066400000000000000000000005311400433636700174110ustar00rootroot00000000000000sudo: required dist: trusty language: generic services: - docker cache: directories: - ~/.cache/pip install: docker build . -t ci-image; script: docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src ci-image:latest tox -e ${TOXENV} ; env: - TOXENV=py27 - TOXENV=py36 - TOXENV=docs python-rocksdb-0.8.0~rc3/Dockerfile000066400000000000000000000013401400433636700172710ustar00rootroot00000000000000FROM ubuntu:18.04 ENV SRC /home/tester/src ENV DEBIAN_FRONTEND noninteractive RUN apt-get update -y && apt-get install -qy \ locales \ git \ wget \ python \ python3 \ python-dev \ python3-dev \ python-pip \ librocksdb-dev \ libsnappy-dev \ zlib1g-dev \ libbz2-dev \ liblz4-dev \ && rm -rf /var/lib/apt/lists/* #NOTE(sileht): really no utf-8 in 2017 !? ENV LANG en_US.UTF-8 RUN update-locale RUN locale-gen $LANG #NOTE(sileht): Upgrade python dev tools RUN pip install -U pip tox virtualenv RUN groupadd --gid 2000 tester RUN useradd --uid 2000 --gid 2000 --create-home --shell /bin/bash tester USER tester WORKDIR $SRC python-rocksdb-0.8.0~rc3/LICENSE000066400000000000000000000027151400433636700163130ustar00rootroot00000000000000Copyright (c) 2014, Stephan Hofmockel All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python-rocksdb-0.8.0~rc3/MANIFEST.in000066400000000000000000000001321400433636700170330ustar00rootroot00000000000000include rocksdb/cpp/*.hpp recursive-include rocksdb *.pxd recursive-include rocksdb *.pyx python-rocksdb-0.8.0~rc3/README.rst000066400000000000000000000020221400433636700167640ustar00rootroot00000000000000python-rocksdb ============== Python bindings for RocksDB. See https://rocksdb-tina.readthedocs.io/ for a more comprehensive install and usage description. Quick install ------------- .. code-block:: bash $ pip install rocksdb Quick usage guide ----------------- .. code-block:: python >>> import rocksdb >>> db = rocksdb.DB('test.db', rocksdb.Options(create_if_missing=True)) >>> db.put(b'a', b'data') >>> print(db.get(b'a')) b'data' Acknowledgements ---------------- This project attempts to collect the efforts put into different forks of the `pyrocksdb`_ project that was originally written by `stephan-hof`_, as sadly none seems to be actively maintained. In particular, the `python-rocksdb`_ fork created by `twmht`_, but it also incorporates changes from other forks and unfinished pull requests. .. _python-rocksdb: https://github.com/twmht/python-rocksdb .. _twmht: https://github.com/twmht .. _pyrocksdb: https://github.com/stephan-hof/pyrocksdb .. _stephan-hof: https://github.com/stephan-hof python-rocksdb-0.8.0~rc3/docs/000077500000000000000000000000001400433636700162315ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/docs/Makefile000066400000000000000000000151561400433636700177010ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rocksdb.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rocksdb.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/rocksdb" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rocksdb" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." python-rocksdb-0.8.0~rc3/docs/_static/000077500000000000000000000000001400433636700176575ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/docs/_static/.empty000066400000000000000000000000001400433636700210040ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/docs/api/000077500000000000000000000000001400433636700170025ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/docs/api/backup.rst000066400000000000000000000040151400433636700210010ustar00rootroot00000000000000Backup and Restore ****************** BackupEngine ============ .. py:class:: rocksdb.BackupEngine .. py:method:: __init__(backup_dir) Creates a object to manage backup of a single database. :param unicode backup_dir: Where to keep the backup files. Has to be different than db.db_name. For example db.db_name + '/backups'. .. py:method:: create_backup(db, flush_before_backup=False) Triggers the creation of a backup. :param db: Database object to backup. :type db: :py:class:`rocksdb.DB` :param bool flush_before_backup: If ``True`` the current memtable is flushed. .. py:method:: restore_backup(backup_id, db_dir, wal_dir) Restores the backup from the given id. :param int backup_id: id of the backup to restore. :param unicode db_dir: Target directory to restore backup. :param unicode wal_dir: Target directory to restore backuped WAL files. .. py:method:: restore_latest_backup(db_dir, wal_dir) Restores the latest backup. :param unicode db_dir: see :py:meth:`restore_backup` :param unicode wal_dir: see :py:meth:`restore_backup` .. py:method:: stop_backup() Can be called from another thread to stop the current backup process. .. py:method:: purge_old_backups(num_backups_to_keep) Deletes all backups (oldest first) until "num_backups_to_keep" are left. :param int num_backups_to_keep: Number of backupfiles to keep. .. py:method:: delete_backup(backup_id) :param int backup_id: Delete the backup with the given id. .. py:method:: get_backup_info() Returns information about all backups. It returns a list of dict's where each dict as the following keys. ``backup_id`` (int): id of this backup. ``timestamp`` (int): Seconds since epoch, when the backup was created. ``size`` (int): Size in bytes of the backup. python-rocksdb-0.8.0~rc3/docs/api/database.rst000066400000000000000000000403621400433636700213050ustar00rootroot00000000000000Database interactions ********************* Database object =============== .. py:class:: rocksdb.DB .. py:method:: __init__(db_name, Options opts, read_only=False) :param unicode db_name: Name of the database to open :param opts: Options for this specific database :type opts: :py:class:`rocksdb.Options` :param bool read_only: If ``True`` the database is opened read-only. All DB calls which modify data will raise an Exception. .. py:method:: put(key, value, sync=False, disable_wal=False) Set the database entry for "key" to "value". :param bytes key: Name for this entry :param bytes value: Data for this entry :param bool sync: If ``True``, the write will be flushed from the operating system buffer cache (by calling WritableFile::Sync()) before the write is considered complete. If this flag is true, writes will be slower. If this flag is ``False``, and the machine crashes, some recent writes may be lost. Note that if it is just the process that crashes (i.e., the machine does not reboot), no writes will be lost even if ``sync == False``. In other words, a DB write with ``sync == False`` has similar crash semantics as the "write()" system call. A DB write with ``sync == True`` has similar crash semantics to a "write()" system call followed by "fdatasync()". :param bool disable_wal: If ``True``, writes will not first go to the write ahead log, and the write may got lost after a crash. .. py:method:: delete(key, sync=False, disable_wal=False) Remove the database entry for "key". :param bytes key: Name to delete :param sync: See :py:meth:`rocksdb.DB.put` :param disable_wal: See :py:meth:`rocksdb.DB.put` :raises rocksdb.errors.NotFound: If the key did not exists .. py:method:: merge(key, value, sync=False, disable_wal=False) Merge the database entry for "key" with "value". The semantics of this operation is determined by the user provided merge_operator when opening DB. See :py:meth:`rocksdb.DB.put` for the parameters :raises: :py:exc:`rocksdb.errors.NotSupported` if this is called and no :py:attr:`rocksdb.Options.merge_operator` was set at creation .. py:method:: write(batch, sync=False, disable_wal=False) Apply the specified updates to the database. :param rocksdb.WriteBatch batch: Batch to apply :param sync: See :py:meth:`rocksdb.DB.put` :param disable_wal: See :py:meth:`rocksdb.DB.put` .. py:method:: get(key, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") :param bytes key: Name to get :param bool verify_checksums: If ``True``, all data read from underlying storage will be verified against corresponding checksums. :param bool fill_cache: Should the "data block", "index block" or "filter block" read for this iteration be cached in memory? Callers may wish to set this field to ``False`` for bulk scans. :param snapshot: If not ``None``, read as of the supplied snapshot (which must belong to the DB that is being read and which must not have been released). Is it ``None`` a implicit snapshot of the state at the beginning of this read operation is used :type snapshot: :py:class:`rocksdb.Snapshot` :param string read_tier: Specify if this read request should process data that ALREADY resides on a particular cache. If the required data is not found at the specified cache, then :py:exc:`rocksdb.errors.Incomplete` is raised. | Use ``all`` if a fetch from disk is allowed. | Use ``cache`` if only data from cache is allowed. :returns: ``None`` if not found, else the value for this key .. py:method:: multi_get(keys, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") :param keys: Keys to fetch :type keys: list of bytes For the other params see :py:meth:`rocksdb.DB.get` :returns: A ``dict`` where the value is either ``bytes`` or ``None`` if not found :raises: If the fetch for a single key fails .. note:: keys will not be "de-duplicated". Duplicate keys will return duplicate values in order. .. py:method:: key_may_exist(key, fetch=False, verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") If the key definitely does not exist in the database, then this method returns ``False``, else ``True``. If the caller wants to obtain value when the key is found in memory, fetch should be set to ``True``. This check is potentially lighter-weight than invoking DB::get(). One way to make this lighter weight is to avoid doing any IOs. :param bytes key: Key to check :param bool fetch: Obtain also the value if found For the other params see :py:meth:`rocksdb.DB.get` :returns: * ``(True, None)`` if key is found but value not in memory * ``(True, None)`` if key is found and ``fetch=False`` * ``(True, )`` if key is found and value in memory and ``fetch=True`` * ``(False, None)`` if key is not found .. py:method:: iterkeys(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") Iterate over the keys For other params see :py:meth:`rocksdb.DB.get` :returns: A iterator object which is not valid yet. Call first one of the seek methods of the iterator to position it :rtype: :py:class:`rocksdb.BaseIterator` .. py:method:: itervalues(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") Iterate over the values For other params see :py:meth:`rocksdb.DB.get` :returns: A iterator object which is not valid yet. Call first one of the seek methods of the iterator to position it :rtype: :py:class:`rocksdb.BaseIterator` .. py:method:: iteritems(verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all") Iterate over the items For other params see :py:meth:`rocksdb.DB.get` :returns: A iterator object which is not valid yet. Call first one of the seek methods of the iterator to position it :rtype: :py:class:`rocksdb.BaseIterator` .. py:method:: snapshot() Return a handle to the current DB state. Iterators created with this handle will all observe a stable snapshot of the current DB state. :rtype: :py:class:`rocksdb.Snapshot` .. py:method:: get_property(prop) DB implementations can export properties about their state via this method. If "property" is a valid property understood by this DB implementation, a byte string with its value is returned. Otherwise ``None`` Valid property names include: * ``b"rocksdb.num-files-at-level"``: return the number of files at level , where is an ASCII representation of a level number (e.g. "0"). * ``b"rocksdb.stats"``: returns a multi-line byte string that describes statistics about the internal operation of the DB. * ``b"rocksdb.sstables"``: returns a multi-line byte string that describes all of the sstables that make up the db contents. * ``b"rocksdb.num-immutable-mem-table"``: Number of immutable mem tables. * ``b"rocksdb.mem-table-flush-pending"``: Returns ``1`` if mem table flush is pending, otherwise ``0``. * ``b"rocksdb.compaction-pending"``: Returns ``1`` if a compaction is pending, otherweise ``0``. * ``b"rocksdb.background-errors"``: Returns accumulated background errors encountered. * ``b"rocksdb.cur-size-active-mem-table"``: Returns current size of the active memtable. .. py:method:: get_live_files_metadata() Returns a list of all table files. It returns a list of dict's were each dict has the following keys. ``name`` Name of the file ``level`` Level at which this file resides ``size`` File size in bytes ``smallestkey`` Smallest user defined key in the file ``largestkey`` Largest user defined key in the file ``smallest_seqno`` smallest seqno in file ``largest_seqno`` largest seqno in file .. py:method:: compact_range(begin=None, end=None, ** options) Compact the underlying storage for the key range [begin,end]. The actual compaction interval might be superset of [begin, end]. In particular, deleted and overwritten versions are discarded, and the data is rearranged to reduce the cost of operations needed to access the data. This operation should typically only be invoked by users who understand the underlying implementation. ``begin == None`` is treated as a key before all keys in the database. ``end == None`` is treated as a key after all keys in the database. Therefore the following call will compact the entire database: ``db.compact_range()``. Note that after the entire database is compacted, all data are pushed down to the last level containing any data. If the total data size after compaction is reduced, that level might not be appropriate for hosting all the files. In this case, client could set change_level to ``True``, to move the files back to the minimum level capable of holding the data set or a given level (specified by non-negative target_level). :param bytes begin: Key where to start compaction. If ``None`` start at the beginning of the database. :param bytes end: Key where to end compaction. If ``None`` end at the last key of the database. :param bool change_level: If ``True``, compacted files will be moved to the minimum level capable of holding the data or given level (specified by non-negative target_level). If ``False`` you may end with a bigger level than configured. Default is ``False``. :param int target_level: If change_level is true and target_level have non-negative value, compacted files will be moved to target_level. Default is ``-1``. :param string bottommost_level_compaction: For level based compaction, we can configure if we want to skip/force bottommost level compaction. By default level based compaction will only compact the bottommost level if there is a compaction filter. It can be set to the following values. ``skip`` Skip bottommost level compaction ``if_compaction_filter`` Only compact bottommost level if there is a compaction filter. This is the default. ``force`` Always compact bottommost level .. py:attribute:: options Returns the associated :py:class:`rocksdb.Options` instance. .. note:: Changes to this object have no effect anymore. Consider this as read-only Iterator ======== .. py:class:: rocksdb.BaseIterator Base class for all iterators in this module. After creation a iterator is invalid. Call one of the seek methods first before starting iteration .. py:method:: seek_to_first() Position at the first key in the source .. py:method:: seek_to_last() Position at the last key in the source .. py:method:: seek(key) :param bytes key: Position at the first key in the source that at or past Methods to support the python iterator protocol .. py:method:: __iter__() .. py:method:: __next__() .. py:method:: __reversed__() Snapshot ======== .. py:class:: rocksdb.Snapshot Opaque handler for a single Snapshot. Snapshot is released if nobody holds a reference on it. Retrieved via :py:meth:`rocksdb.DB.snapshot` WriteBatch ========== .. py:class:: rocksdb.WriteBatch WriteBatch holds a collection of updates to apply atomically to a DB. The updates are applied in the order in which they are added to the WriteBatch. For example, the value of "key" will be "v3" after the following batch is written:: batch = rocksdb.WriteBatch() batch.put(b"key", b"v1") batch.delete(b"key") batch.put(b"key", b"v2") batch.put(b"key", b"v3") .. py:method:: __init__(data=None) Creates a WriteBatch. :param bytes data: A serialized version of a previous WriteBatch. As retrieved from a previous .data() call. If ``None`` a empty WriteBatch is generated .. py:method:: put(key, value) Store the mapping "key->value" in the database. :param bytes key: Name of the entry to store :param bytes value: Data of this entry .. py:method:: merge(key, value) Merge "value" with the existing value of "key" in the database. :param bytes key: Name of the entry to merge :param bytes value: Data to merge .. py:method:: delete(key) If the database contains a mapping for "key", erase it. Else do nothing. :param bytes key: Key to erase .. py:method:: clear() Clear all updates buffered in this batch. .. note:: Don't call this method if there is an outstanding iterator. Calling :py:meth:`rocksdb.WriteBatch.clear()` with outstanding iterator, leads to SEGFAULT. .. py:method:: data() Retrieve the serialized version of this batch. :rtype: ``bytes`` .. py:method:: count() Returns the number of updates in the batch :rtype: int .. py:method:: __iter__() Returns an iterator over the current contents of the write batch. If you add new items to the batch, they are not visible for this iterator. Create a new one if you need to see them. .. note:: Calling :py:meth:`rocksdb.WriteBatch.clear()` on the write batch invalidates the iterator. Using a iterator where its corresponding write batch has been cleared, leads to SEGFAULT. :rtype: :py:class:`rocksdb.WriteBatchIterator` WriteBatchIterator ================== .. py:class:: rocksdb.WriteBatchIterator .. py:method:: __iter__() Returns self. .. py:method:: __next__() Returns the next item inside the corresponding write batch. The return value is a tuple of always size three. First item (Name of the operation): * ``"Put"`` * ``"Merge"`` * ``"Delete"`` Second item (key): Key for this operation. Third item (value): The value for this operation. Empty for ``"Delete"``. Repair DB ========= .. py:function:: repair_db(db_name, opts) :param unicode db_name: Name of the database to open :param opts: Options for this specific database :type opts: :py:class:`rocksdb.Options` If a DB cannot be opened, you may attempt to call this method to resurrect as much of the contents of the database as possible. Some data may be lost, so be careful when calling this function on a database that contains important information. Errors ====== .. py:exception:: rocksdb.errors.NotFound .. py:exception:: rocksdb.errors.Corruption .. py:exception:: rocksdb.errors.NotSupported .. py:exception:: rocksdb.errors.InvalidArgument .. py:exception:: rocksdb.errors.RocksIOError .. py:exception:: rocksdb.errors.MergeInProgress .. py:exception:: rocksdb.errors.Incomplete python-rocksdb-0.8.0~rc3/docs/api/index.rst000066400000000000000000000002721400433636700206440ustar00rootroot00000000000000Python driver for RocksDB ========================= .. py:module:: rocksdb .. toctree:: Options Database Interfaces Backup python-rocksdb-0.8.0~rc3/docs/api/interfaces.rst000066400000000000000000000225321400433636700216630ustar00rootroot00000000000000Interfaces ********** Comparator ========== .. py:class:: rocksdb.interfaces.Comparator A Comparator object provides a total order across slices that are used as keys in an sstable or a database. A Comparator implementation must be thread-safe since rocksdb may invoke its methods concurrently from multiple threads. .. py:method:: compare(a, b) Three-way comparison. :param bytes a: First field to compare :param bytes b: Second field to compare :returns: * -1 if a < b * 0 if a == b * 1 if a > b :rtype: ``int`` .. py:method:: name() The name of the comparator. Used to check for comparator mismatches (i.e., a DB created with one comparator is accessed using a different comparator). The client of this package should switch to a new name whenever the comparator implementation changes in a way that will cause the relative ordering of any two keys to change. Names starting with "rocksdb." are reserved and should not be used by any clients of this package. :rtype: ``bytes`` Merge Operator ============== Essentially, a MergeOperator specifies the SEMANTICS of a merge, which only client knows. It could be numeric addition, list append, string concatenation, edit data structure, whatever. The library, on the other hand, is concerned with the exercise of this interface, at the right time (during get, iteration, compaction...) To use merge, the client needs to provide an object implementing one of the following interfaces: * AssociativeMergeOperator - for most simple semantics (always take two values, and merge them into one value, which is then put back into rocksdb). numeric addition and string concatenation are examples. * MergeOperator - the generic class for all the more complex operations. One method (FullMerge) to merge a Put/Delete value with a merge operand. Another method (PartialMerge) that merges two operands together. This is especially useful if your key values have a complex structure but you would still like to support client-specific incremental updates. AssociativeMergeOperator is simpler to implement. MergeOperator is simply more powerful. See this page for more details https://github.com/facebook/rocksdb/wiki/Merge-Operator AssociativeMergeOperator ------------------------ .. py:class:: rocksdb.interfaces.AssociativeMergeOperator .. py:method:: merge(key, existing_value, value) Gives the client a way to express the read -> modify -> write semantics :param bytes key: The key that's associated with this merge operation :param bytes existing_value: The current value in the db. ``None`` indicates the key does not exist before this op :param bytes value: The value to update/merge the existing_value with :returns: ``True`` and the new value on success. All values passed in will be client-specific values. So if this method returns false, it is because client specified bad data or there was internal corruption. The client should assume that this will be treated as an error by the library. :rtype: ``(bool, bytes)`` .. py:method:: name() The name of the MergeOperator. Used to check for MergeOperator mismatches. For example a DB created with one MergeOperator is accessed using a different MergeOperator. :rtype: ``bytes`` MergeOperator ------------- .. py:class:: rocksdb.interfaces.MergeOperator .. py:method:: full_merge(key, existing_value, operand_list) Gives the client a way to express the read -> modify -> write semantics :param bytes key: The key that's associated with this merge operation. Client could multiplex the merge operator based on it if the key space is partitioned and different subspaces refer to different types of data which have different merge operation semantics :param bytes existing_value: The current value in the db. ``None`` indicates the key does not exist before this op :param operand_list: The sequence of merge operations to apply. :type operand_list: list of bytes :returns: ``True`` and the new value on success. All values passed in will be client-specific values. So if this method returns false, it is because client specified bad data or there was internal corruption. The client should assume that this will be treated as an error by the library. :rtype: ``(bool, bytes)`` .. py:method:: partial_merge(key, left_operand, right_operand) This function performs merge(left_op, right_op) when both the operands are themselves merge operation types that you would have passed to a DB::Merge() call in the same order. For example DB::Merge(key,left_op), followed by DB::Merge(key,right_op)). PartialMerge should combine them into a single merge operation that is returned together with ``True`` This new value should be constructed such that a call to DB::Merge(key, new_value) would yield the same result as a call to DB::Merge(key, left_op) followed by DB::Merge(key, right_op). If it is impossible or infeasible to combine the two operations, return ``(False, None)`` The library will internally keep track of the operations, and apply them in the correct order once a base-value (a Put/Delete/End-of-Database) is seen. :param bytes key: the key that is associated with this merge operation. :param bytes left_operand: First operand to merge :param bytes right_operand: Second operand to merge :rtype: ``(bool, bytes)`` .. note:: Presently there is no way to differentiate between error/corruption and simply "return false". For now, the client should simply return false in any case it cannot perform partial-merge, regardless of reason. If there is corruption in the data, handle it in the FullMerge() function, and return false there. .. py:method:: name() The name of the MergeOperator. Used to check for MergeOperator mismatches. For example a DB created with one MergeOperator is accessed using a different MergeOperator. :rtype: ``bytes`` FilterPolicy ============ .. py:class:: rocksdb.interfaces.FilterPolicy .. py:method:: create_filter(keys) Create a bytestring which can act as a filter for keys. :param keys: list of keys (potentially with duplicates) that are ordered according to the user supplied comparator. :type keys: list of bytes :returns: A filter that summarizes keys :rtype: ``bytes`` .. py:method:: key_may_match(key, filter) Check if the key is maybe in the filter. :param bytes key: Key for a single entry inside the database :param bytes filter: Contains the data returned by a preceding call to create_filter on this class :returns: This method must return ``True`` if the key was in the list of keys passed to create_filter(). This method may return ``True`` or ``False`` if the key was not on the list, but it should aim to return ``False`` with a high probability. :rtype: ``bool`` .. py:method:: name() Return the name of this policy. Note that if the filter encoding changes in an incompatible way, the name returned by this method must be changed. Otherwise, old incompatible filters may be passed to methods of this type. :rtype: ``bytes`` SliceTransform ============== .. py:class:: rocksdb.interfaces.SliceTransform SliceTransform is currently used to implement the 'prefix-API' of rocksdb. https://github.com/facebook/rocksdb/wiki/Proposal-for-prefix-API .. py:method:: transform(src) :param bytes src: Full key to extract the prefix from. :returns: A tuple of two interges ``(offset, size)``. Where the first integer is the offset within the ``src`` and the second the size of the prefix after the offset. Which means the prefix is generted by ``src[offset:offset+size]`` :rtype: ``(int, int)`` .. py:method:: in_domain(src) Decide if a prefix can be extraced from ``src``. Only if this method returns ``True`` :py:meth:`transform` will be called. :param bytes src: Full key to check. :rtype: ``bool`` .. py:method:: in_range(prefix) Checks if prefix is a valid prefix :param bytes prefix: Prefix to check. :returns: ``True`` if ``prefix`` is a valid prefix. :rtype: ``bool`` .. py:method:: name() Return the name of this transformation. :rtype: ``bytes`` python-rocksdb-0.8.0~rc3/docs/api/options.rst000066400000000000000000001074451400433636700212420ustar00rootroot00000000000000Options creation **************** Options object ============== .. py:class:: rocksdb.Options .. IMPORTANT:: The default values mentioned here, describe the values of the C++ library only. This wrapper does not set any default value itself. So as soon as the rocksdb developers change a default value this document could be outdated. So if you really depend on a default value, double check it with the according version of the C++ library. | Most recent default values should be here | https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h | https://github.com/facebook/rocksdb/blob/master/util/options.cc .. py:method:: __init__(**kwargs) All options mentioned below can also be passed as keyword-arguments in the constructor. For example:: import rocksdb opts = rocksdb.Options(create_if_missing=True) # is the same as opts = rocksdb.Options() opts.create_if_missing = True .. py:attribute:: create_if_missing If ``True``, the database will be created if it is missing. | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: error_if_exists If ``True``, an error is raised if the database already exists. | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: paranoid_checks If ``True``, the implementation will do aggressive checking of the data it is processing and will stop early if it detects any errors. This may have unforeseen ramifications: for example, a corruption of one DB entry may cause a large number of entries to become unreadable or for the entire DB to become unopenable. If any of the writes to the database fails (Put, Delete, Merge, Write), the database will switch to read-only mode and fail all other Write operations. | *Type:* ``bool`` | *Default:* ``True`` .. py:attribute:: write_buffer_size Amount of data to build up in memory (backed by an unsorted log on disk) before converting to a sorted on-disk file. Larger values increase performance, especially during bulk loads. Up to max_write_buffer_number write buffers may be held in memory at the same time, so you may wish to adjust this parameter to control memory usage. Also, a larger write buffer will result in a longer recovery time the next time the database is opened. | *Type:* ``int`` | *Default:* ``4194304`` .. py:attribute:: max_write_buffer_number The maximum number of write buffers that are built up in memory. The default is 2, so that when 1 write buffer is being flushed to storage, new writes can continue to the other write buffer. | *Type:* ``int`` | *Default:* ``2`` .. py:attribute:: min_write_buffer_number_to_merge The minimum number of write buffers that will be merged together before writing to storage. If set to 1, then all write buffers are fushed to L0 as individual files and this increases read amplification because a get request has to check in all of these files. Also, an in-memory merge may result in writing lesser data to storage if there are duplicate records in each of these individual write buffers. | *Type:* ``int`` | *Default:* ``1`` .. py:attribute:: max_open_files Number of open files that can be used by the DB. You may need to increase this if your database has a large working set. Value -1 means files opened are always kept open. You can estimate number of files based on target_file_size_base and target_file_size_multiplier for level-based compaction. For universal-style compaction, you can usually set it to -1. | *Type:* ``int`` | *Default:* ``5000`` .. py:attribute:: compression Compress blocks using the specified compression algorithm. This parameter can be changed dynamically. | *Type:* Member of :py:class:`rocksdb.CompressionType` | *Default:* :py:attr:`rocksdb.CompressionType.snappy_compression` .. py:attribute:: num_levels Number of levels for this database | *Type:* ``int`` | *Default:* ``7`` .. py:attribute:: level0_file_num_compaction_trigger Number of files to trigger level-0 compaction. A value <0 means that level-0 compaction will not be triggered by number of files at all. | *Type:* ``int`` | *Default:* ``4`` .. py:attribute:: level0_slowdown_writes_trigger Soft limit on number of level-0 files. We start slowing down writes at this point. A value <0 means that no writing slow down will be triggered by number of files in level-0. | *Type:* ``int`` | *Default:* ``20`` .. py:attribute:: level0_stop_writes_trigger Maximum number of level-0 files. We stop writes at this point. | *Type:* ``int`` | *Default:* ``24`` .. py:attribute:: max_mem_compaction_level Maximum level to which a new compacted memtable is pushed if it does not create overlap. We try to push to level 2 to avoid the relatively expensive level 0=>1 compactions and to avoid some expensive manifest file operations. We do not push all the way to the largest level since that can generate a lot of wasted disk space if the same key space is being repeatedly overwritten. | *Type:* ``int`` | *Default:* ``2`` .. py:attribute:: target_file_size_base | Target file size for compaction. | target_file_size_base is per-file size for level-1. | Target file size for level L can be calculated by | target_file_size_base * (target_file_size_multiplier ^ (L-1)). For example, if target_file_size_base is 2MB and target_file_size_multiplier is 10, then each file on level-1 will be 2MB, and each file on level 2 will be 20MB, and each file on level-3 will be 200MB. | *Type:* ``int`` | *Default:* ``2097152`` .. py:attribute:: target_file_size_multiplier | by default target_file_size_multiplier is 1, which means | by default files in different levels will have similar size. | *Type:* ``int`` | *Default:* ``1`` .. py:attribute:: max_bytes_for_level_base Control maximum total data size for a level. *max_bytes_for_level_base* is the max total for level-1. Maximum number of bytes for level L can be calculated as (*max_bytes_for_level_base*) * (*max_bytes_for_level_multiplier* ^ (L-1)) For example, if *max_bytes_for_level_base* is 20MB, and if *max_bytes_for_level_multiplier* is 10, total data size for level-1 will be 20MB, total file size for level-2 will be 200MB, and total file size for level-3 will be 2GB. | *Type:* ``int`` | *Default:* ``10485760`` .. py:attribute:: max_bytes_for_level_multiplier See :py:attr:`max_bytes_for_level_base` | *Type:* ``int`` | *Default:* ``10`` .. py:attribute:: max_bytes_for_level_multiplier_additional Different max-size multipliers for different levels. These are multiplied by max_bytes_for_level_multiplier to arrive at the max-size of each level. | *Type:* ``[int]`` | *Default:* ``[1, 1, 1, 1, 1, 1, 1]`` .. py:attribute:: max_compaction_bytes We try to limit number of bytes in one compaction to be lower than this threshold. But it's not guaranteed. Value 0 will be sanitized. | *Type:* ``int`` | *Default:* ``target_file_size_base * 25`` .. py:attribute:: use_fsync If true, then every store to stable storage will issue a fsync. If false, then every store to stable storage will issue a fdatasync. This parameter should be set to true while storing data to filesystem like ext3 that can lose files after a reboot. | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: db_log_dir This specifies the info LOG dir. If it is empty, the log files will be in the same dir as data. If it is non empty, the log files will be in the specified dir, and the db data dir's absolute path will be used as the log file name's prefix. | *Type:* ``unicode`` | *Default:* ``""`` .. py:attribute:: wal_dir This specifies the absolute dir path for write-ahead logs (WAL). If it is empty, the log files will be in the same dir as data, dbname is used as the data dir by default. If it is non empty, the log files will be in kept the specified dir. When destroying the db, all log files in wal_dir and the dir itself is deleted | *Type:* ``unicode`` | *Default:* ``""`` .. py:attribute:: delete_obsolete_files_period_micros The periodicity when obsolete files get deleted. The default value is 6 hours. The files that get out of scope by compaction process will still get automatically delete on every compaction, regardless of this setting | *Type:* ``int`` | *Default:* ``21600000000`` .. py:attribute:: max_background_compactions Maximum number of concurrent background jobs, submitted to the default LOW priority thread pool | *Type:* ``int`` | *Default:* ``1`` .. py:attribute:: max_background_flushes Maximum number of concurrent background memtable flush jobs, submitted to the HIGH priority thread pool. By default, all background jobs (major compaction and memtable flush) go to the LOW priority pool. If this option is set to a positive number, memtable flush jobs will be submitted to the HIGH priority pool. It is important when the same Env is shared by multiple db instances. Without a separate pool, long running major compaction jobs could potentially block memtable flush jobs of other db instances, leading to unnecessary Put stalls. | *Type:* ``int`` | *Default:* ``1`` .. py:attribute:: max_log_file_size Specify the maximal size of the info log file. If the log file is larger than `max_log_file_size`, a new info log file will be created. If max_log_file_size == 0, all logs will be written to one log file. | *Type:* ``int`` | *Default:* ``0`` .. py:attribute:: log_file_time_to_roll Time for the info log file to roll (in seconds). If specified with non-zero value, log file will be rolled if it has been active longer than `log_file_time_to_roll`. A value of ``0`` means disabled. | *Type:* ``int`` | *Default:* ``0`` .. py:attribute:: keep_log_file_num Maximal info log files to be kept. | *Type:* ``int`` | *Default:* ``1000`` .. py:attribute:: soft_rate_limit Puts are delayed 0-1 ms when any level has a compaction score that exceeds soft_rate_limit. This is ignored when == 0.0. CONSTRAINT: soft_rate_limit <= hard_rate_limit. If this constraint does not hold, RocksDB will set soft_rate_limit = hard_rate_limit. A value of ``0`` means disabled. | *Type:* ``float`` | *Default:* ``0`` .. py:attribute:: hard_rate_limit Puts are delayed 1ms at a time when any level has a compaction score that exceeds hard_rate_limit. This is ignored when <= 1.0. A value fo ``0`` means disabled. | *Type:* ``float`` | *Default:* ``0`` .. py:attribute:: rate_limit_delay_max_milliseconds Max time a put will be stalled when hard_rate_limit is enforced. If 0, then there is no limit. | *Type:* ``int`` | *Default:* ``1000`` .. py:attribute:: max_manifest_file_size manifest file is rolled over on reaching this limit. The older manifest file be deleted. The default value is MAX_INT so that roll-over does not take place. | *Type:* ``int`` | *Default:* ``(2**64) - 1`` .. py:attribute:: table_cache_numshardbits Number of shards used for table cache. | *Type:* ``int`` | *Default:* ``4`` .. py:attribute:: arena_block_size size of one block in arena memory allocation. If <= 0, a proper value is automatically calculated (usually 1/10 of writer_buffer_size). | *Type:* ``int`` | *Default:* ``0`` .. py:attribute:: disable_auto_compactions Disable automatic compactions. Manual compactions can still be issued on this database. | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: wal_ttl_seconds, wal_size_limit_mb The following two fields affect how archived logs will be deleted. 1. If both set to 0, logs will be deleted asap and will not get into the archive. 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0, WAL files will be checked every 10 min and if total size is greater then wal_size_limit_mb, they will be deleted starting with the earliest until size_limit is met. All empty files will be deleted. 3. If wal_ttl_seconds is not 0 and wal_size_limit_mb is 0, then WAL files will be checked every wal_ttl_secondsi / 2 and those that are older than wal_ttl_seconds will be deleted. 4. If both are not 0, WAL files will be checked every 10 min and both checks will be performed with ttl being first. | *Type:* ``int`` | *Default:* ``0`` .. py:attribute:: manifest_preallocation_size Number of bytes to preallocate (via fallocate) the manifest files. Default is 4mb, which is reasonable to reduce random IO as well as prevent overallocation for mounts that preallocate large amounts of data (such as xfs's allocsize option). | *Type:* ``int`` | *Default:* ``4194304`` .. py:attribute:: purge_redundant_kvs_while_flush Purge duplicate/deleted keys when a memtable is flushed to storage. | *Type:* ``bool`` | *Default:* ``True`` .. py:attribute:: allow_mmap_reads Allow the OS to mmap file for reading sst tables | *Type:* ``bool`` | *Default:* ``True`` .. py:attribute:: allow_mmap_writes Allow the OS to mmap file for writing | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: is_fd_close_on_exec Disable child process inherit open files | *Type:* ``bool`` | *Default:* ``True`` .. py:attribute:: skip_log_error_on_recovery Skip log corruption error on recovery (If client is ok with losing most recent changes) | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: stats_dump_period_sec If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec | *Type:* ``int`` | *Default:* ``3600`` .. py:attribute:: advise_random_on_open If set true, will hint the underlying file system that the file access pattern is random, when a sst file is opened. | *Type:* ``bool`` | *Default:* ``True`` .. py:attribute:: use_adaptive_mutex Use adaptive mutex, which spins in the user space before resorting to kernel. This could reduce context switch when the mutex is not heavily contended. However, if the mutex is hot, we could end up wasting spin time. | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: bytes_per_sync Allows OS to incrementally sync files to disk while they are being written, asynchronously, in the background. Issue one request for every bytes_per_sync written. 0 turns it off. | *Type:* ``int`` | *Default:* ``0`` .. py:attribute:: compaction_style The compaction style. Could be set to ``"level"`` to use level-style compaction. For universal-style compaction use ``"universal"``. For FIFO compaction use ``"fifo"``. If no compaction style use ``"none"``. | *Type:* ``string`` | *Default:* ``level`` .. py:attribute:: compaction_pri If level compaction_style = kCompactionStyleLevel, for each level, which files are prioritized to be picked to compact. | *Type:* Member of :py:class:`rocksdb.CompactionPri` | *Default:* :py:attr:`rocksdb.CompactionPri.kByCompensatedSize` .. py:attribute:: compaction_options_universal Options to use for universal-style compaction. They make only sense if :py:attr:`rocksdb.Options.compaction_style` is set to ``"universal"``. It is a dict with the following keys. * ``size_ratio``: Percentage flexibilty while comparing file size. If the candidate file(s) size is 1% smaller than the next file's size, then include next file into this candidate set. Default: ``1`` * ``min_merge_width``: The minimum number of files in a single compaction run. Default: ``2`` * ``max_merge_width``: The maximum number of files in a single compaction run. Default: ``UINT_MAX`` * ``max_size_amplification_percent``: The size amplification is defined as the amount (in percentage) of additional storage needed to store a single byte of data in the database. For example, a size amplification of 2% means that a database that contains 100 bytes of user-data may occupy upto 102 bytes of physical storage. By this definition, a fully compacted database has a size amplification of 0%. Rocksdb uses the following heuristic to calculate size amplification: it assumes that all files excluding the earliest file contribute to the size amplification. Default: ``200``, which means that a 100 byte database could require upto 300 bytes of storage. * ``compression_size_percent``: If this option is set to be -1 (the default value), all the output files will follow compression type specified. If this option is not negative, we will try to make sure compressed size is just above this value. In normal cases, at least this percentage of data will be compressed. When we are compacting to a new file, here is the criteria whether it needs to be compressed: assuming here are the list of files sorted by generation time: ``A1...An B1...Bm C1...Ct`` where ``A1`` is the newest and ``Ct`` is the oldest, and we are going to compact ``B1...Bm``, we calculate the total size of all the files as total_size, as well as the total size of ``C1...Ct`` as ``total_C``, the compaction output file will be compressed if ``total_C / total_size < this percentage``. Default: -1 * ``stop_style``: The algorithm used to stop picking files into a single compaction. Can be either ``"similar_size"`` or ``"total_size"``. * ``similar_size``: Pick files of similar size. * ``total_size``: Total size of picked files is greater than next file. Default: ``"total_size"`` For setting options, just assign a dict with the fields to set. It is allowed to omit keys in this dict. Missing keys are just not set to the underlying options object. This example just changes the stop_style and leaves the other options untouched. :: opts = rocksdb.Options() opts.compaction_options_universal = {'stop_style': 'similar_size'} .. py:attribute:: max_sequential_skip_in_iterations An iteration->Next() sequentially skips over keys with the same user-key unless this option is set. This number specifies the number of keys (with the same userkey) that will be sequentially skipped before a reseek is issued. | *Type:* ``int`` | *Default:* ``8`` .. py:attribute:: memtable_factory This is a factory that provides MemTableRep objects. Right now you can assing instances of the following classes. * :py:class:`rocksdb.VectorMemtableFactory` * :py:class:`rocksdb.SkipListMemtableFactory` * :py:class:`rocksdb.HashSkipListMemtableFactory` * :py:class:`rocksdb.HashLinkListMemtableFactory` *Default:* :py:class:`rocksdb.SkipListMemtableFactory` .. py:attribute:: table_factory Factory for the files forming the persisten data storage. Sometimes they are also named SST-Files. Right now you can assign instances of the following classes. * :py:class:`rocksdb.BlockBasedTableFactory` * :py:class:`rocksdb.PlainTableFactory` * :py:class:`rocksdb.TotalOrderPlainTableFactory` *Default:* :py:class:`rocksdb.BlockBasedTableFactory` .. py:attribute:: inplace_update_support Allows thread-safe inplace updates. Requires Updates if * key exists in current memtable * new sizeof(new_value) <= sizeof(old_value) * old_value for that key is a put i.e. kTypeValue | *Type:* ``bool`` | *Default:* ``False`` .. py:attribute:: inplace_update_num_locks | Number of locks used for inplace update. | Default: 10000, if inplace_update_support = true, else 0. | *Type:* ``int`` | *Default:* ``10000`` .. py:attribute:: comparator Comparator used to define the order of keys in the table. A python comparator must implement the :py:class:`rocksdb.interfaces.Comparator` interface. *Requires*: The client must ensure that the comparator supplied here has the same name and orders keys *exactly* the same as the comparator provided to previous open calls on the same DB. *Default:* :py:class:`rocksdb.BytewiseComparator` .. py:attribute:: merge_operator The client must provide a merge operator if Merge operation needs to be accessed. Calling Merge on a DB without a merge operator would result in :py:exc:`rocksdb.errors.NotSupported`. The client must ensure that the merge operator supplied here has the same name and *exactly* the same semantics as the merge operator provided to previous open calls on the same DB. The only exception is reserved for upgrade, where a DB previously without a merge operator is introduced to Merge operation for the first time. It's necessary to specify a merge operator when openning the DB in this case. A python merge operator must implement the :py:class:`rocksdb.interfaces.MergeOperator` or :py:class:`rocksdb.interfaces.AssociativeMergeOperator` interface. *Default:* ``None`` .. py:attribute:: prefix_extractor If not ``None``, use the specified function to determine the prefixes for keys. These prefixes will be placed in the filter. Depending on the workload, this can reduce the number of read-IOP cost for scans when a prefix is passed to the calls generating an iterator (:py:meth:`rocksdb.DB.iterkeys` ...). A python prefix_extractor must implement the :py:class:`rocksdb.interfaces.SliceTransform` interface For prefix filtering to work properly, "prefix_extractor" and "comparator" must be such that the following properties hold: 1. ``key.starts_with(prefix(key))`` 2. ``compare(prefix(key), key) <= 0`` 3. ``If compare(k1, k2) <= 0, then compare(prefix(k1), prefix(k2)) <= 0`` 4. ``prefix(prefix(key)) == prefix(key)`` *Default:* ``None`` .. py:attribute:: row_cache A global cache for table-level rows. If ``None`` this cache is not used. Otherwise it must be an instance of :py:class:`rocksdb.LRUCache` *Default:* ``None`` CompactionPri ================ .. py:class:: rocksdb.CompactionPri Defines the support compression types .. py:attribute:: kByCompensatedSize .. py:attribute:: kOldestLargestSeqFirst .. py:attribute:: kOldestSmallestSeqFirst .. py:attribute:: kMinOverlappingRatio CompressionTypes ================ .. py:class:: rocksdb.CompressionType Defines the support compression types .. py:attribute:: no_compression .. py:attribute:: snappy_compression .. py:attribute:: zlib_compression .. py:attribute:: bzip2_compression .. py:attribute:: lz4_compression .. py:attribute:: lz4hc_compression .. py:attribute:: xpress_compression .. py:attribute:: zstd_compression .. py:attribute:: zstdnotfinal_compression .. py:attribute:: disable_compression BytewiseComparator ================== .. py:class:: rocksdb.BytewiseComparator Wraps the rocksdb Bytewise Comparator, it uses lexicographic byte-wise ordering BloomFilterPolicy ================= .. py:class:: rocksdb.BloomFilterPolicy Wraps the rocksdb BloomFilter Policy .. py:method:: __init__(bits_per_key) :param int bits_per_key: Specifies the approximately number of bits per key. A good value for bits_per_key is 10, which yields a filter with ~ 1% false positive rate. LRUCache ======== .. py:class:: rocksdb.LRUCache Wraps the rocksdb LRUCache .. py:method:: __init__(capacity, shard_bits=None) Create a new cache with a fixed size capacity (in bytes). The cache is sharded to 2^numShardBits shards, by hash of the key. The total capacity is divided and evenly assigned to each shard. .. _table_factories_label: TableFactories ============== Currently RocksDB supports two types of tables: plain table and block-based table. Instances of this classes can assigned to :py:attr:`rocksdb.Options.table_factory` * *Block-based table:* This is the default table type that RocksDB inherited from LevelDB. It was designed for storing data in hard disk or flash device. * *Plain table:* It is one of RocksDB's SST file format optimized for low query latency on pure-memory or really low-latency media. Tutorial of rocksdb table formats is available here: https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats .. py:class:: rocksdb.BlockBasedTableFactory Wraps BlockBasedTableFactory of RocksDB. .. py:method:: __init__(index_type='binary_search', hash_index_allow_collision=True, checksum='crc32', block_cache, block_cache_compressed, filter_policy=None, no_block_cache=False, block_size=None, block_size_deviation=None, block_restart_interval=None, whole_key_filtering=None): :param string index_type: * ``binary_search`` a space efficient index block that is optimized for binary-search-based index. * ``hash_search`` the hash index. If enabled, will do hash lookup when `Options.prefix_extractor` is provided. :param bool hash_index_allow_collision: Influence the behavior when ``hash_search`` is used. If ``False``, stores a precise prefix to block range mapping. If ``True``, does not store prefix and allows prefix hash collision (less memory consumption) :param string checksum: Use the specified checksum type. Newly created table files will be protected with this checksum type. Old table files will still be readable, even though they have different checksum type. Can be either ``crc32`` or ``xxhash``. :param block_cache: Control over blocks (user data is stored in a set of blocks, and a block is the unit of reading from disk). If ``None``, rocksdb will automatically create and use an 8MB internal cache. If not ``None`` use the specified cache for blocks. In that case it must be an instance of :py:class:`rocksdb.LRUCache` :param block_cache_compressed: If ``None``, rocksdb will not use a compressed block cache. If not ``None`` use the specified cache for compressed blocks. In that case it must be an instance of :py:class:`rocksdb.LRUCache` :param filter_policy: If not ``None`` use the specified filter policy to reduce disk reads. A python filter policy must implement the :py:class:`rocksdb.interfaces.FilterPolicy` interface. Recommended is a instance of :py:class:`rocksdb.BloomFilterPolicy` :param bool no_block_cache: Disable block cache. If this is set to true, then no block cache should be used, and the block_cache should point to ``None`` :param int block_size: If set to ``None`` the rocksdb default of ``4096`` is used. Approximate size of user data packed per block. Note that the block size specified here corresponds to uncompressed data. The actual size of the unit read from disk may be smaller if compression is enabled. This parameter can be changed dynamically. :param int block_size_deviation: If set to ``None`` the rocksdb default of ``10`` is used. This is used to close a block before it reaches the configured 'block_size'. If the percentage of free space in the current block is less than this specified number and adding a new record to the block will exceed the configured block size, then this block will be closed and the new record will be written to the next block. :param int block_restart_interval: If set to ``None`` the rocksdb default of ``16`` is used. Number of keys between restart points for delta encoding of keys. This parameter can be changed dynamically. Most clients should leave this parameter alone. :param bool whole_key_filtering: If set to ``None`` the rocksdb default of ``True`` is used. If ``True``, place whole keys in the filter (not just prefixes). This must generally be true for gets to be efficient. .. py:class:: rocksdb.PlainTableFactory Plain Table with prefix-only seek. It wraps rocksdb PlainTableFactory. For this factory, you need to set :py:attr:`rocksdb.Options.prefix_extractor` properly to make it work. Look-up will start with prefix hash lookup for key prefix. Inside the hash bucket found, a binary search is executed for hash conflicts. Finally, a linear search is used. .. py:method:: __init__(user_key_len=0, bloom_bits_per_key=10, hash_table_ratio=0.75, index_sparseness=10, huge_page_tlb_size=0, encoding_type='plain', full_scan_mode=False, store_index_in_file=False) :param int user_key_len: Plain table has optimization for fix-sized keys, which can be specified via user_key_len. Alternatively, you can pass `0` if your keys have variable lengths. :param int bloom_bits_per_key: The number of bits used for bloom filer per prefix. You may disable it by passing `0`. :param float hash_table_ratio: The desired utilization of the hash table used for prefix hashing. hash_table_ratio = number of prefixes / #buckets in the hash table. :param int index_sparseness: Inside each prefix, need to build one index record for how many keys for binary search inside each hash bucket. For encoding type ``prefix``, the value will be used when writing to determine an interval to rewrite the full key. It will also be used as a suggestion and satisfied when possible. :param int huge_page_tlb_size: If <=0, allocate hash indexes and blooms from malloc. Otherwise from huge page TLB. The user needs to reserve huge pages for it to be allocated, like: ``sysctl -w vm.nr_hugepages=20`` See linux doc Documentation/vm/hugetlbpage.txt :param string encoding_type: How to encode the keys. The value will determine how to encode keys when writing to a new SST file. This value will be stored inside the SST file which will be used when reading from the file, which makes it possible for users to choose different encoding type when reopening a DB. Files with different encoding types can co-exist in the same DB and can be read. * ``plain``: Always write full keys without any special encoding. * ``prefix``: Find opportunity to write the same prefix once for multiple rows. In some cases, when a key follows a previous key with the same prefix, instead of writing out the full key, it just writes out the size of the shared prefix, as well as other bytes, to save some bytes. When using this option, the user is required to use the same prefix extractor to make sure the same prefix will be extracted from the same key. The Name() value of the prefix extractor will be stored in the file. When reopening the file, the name of the options.prefix_extractor given will be bitwise compared to the prefix extractors stored in the file. An error will be returned if the two don't match. :param bool full_scan_mode: Mode for reading the whole file one record by one without using the index. :param bool store_index_in_file: Compute plain table index and bloom filter during file building and store it in file. When reading file, index will be mmaped instead of recomputation. .. _memtable_factories_label: MemtableFactories ================= RocksDB has different classes to represent the in-memory buffer for the current operations. You have to assing instances of the following classes to :py:attr:`rocksdb.Options.memtable_factory`. This page has a comparison the most popular ones. https://github.com/facebook/rocksdb/wiki/Hash-based-memtable-implementations .. py:class:: rocksdb.VectorMemtableFactory This creates MemTableReps that are backed by an std::vector. On iteration, the vector is sorted. This is useful for workloads where iteration is very rare and writes are generally not issued after reads begin. .. py:method:: __init__(count=0) :param int count: Passed to the constructor of the underlying std::vector of each VectorRep. On initialization, the underlying array will be at least count bytes reserved for usage. .. py:class:: rocksdb.SkipListMemtableFactory This uses a skip list to store keys. .. py:method:: __init__() .. py:class:: rocksdb.HashSkipListMemtableFactory This class contains a fixed array of buckets, each pointing to a skiplist (null if the bucket is empty). .. note:: :py:attr:`rocksdb.Options.prefix_extractor` must be set, otherwise rocksdb fails back to skip-list. .. py:method:: __init__(bucket_count = 1000000, skiplist_height = 4, skiplist_branching_factor = 4) :param int bucket_count: number of fixed array buckets :param int skiplist_height: the max height of the skiplist :param int skiplist_branching_factor: probabilistic size ratio between adjacent link lists in the skiplist .. py:class:: rocksdb.HashLinkListMemtableFactory The factory is to create memtables with a hashed linked list. It contains a fixed array of buckets, each pointing to a sorted single linked list (null if the bucket is empty). .. note:: :py:attr:`rocksdb.Options.prefix_extractor` must be set, otherwise rocksdb fails back to skip-list. .. py:method:: __init__(bucket_count=50000) :param int bucket: number of fixed array buckets python-rocksdb-0.8.0~rc3/docs/changelog.rst000066400000000000000000000165661400433636700207300ustar00rootroot00000000000000Changelog ********* Version 0.8 ----------- Yet Another Fork, started by @NightTsarina, collecting loose commits from the many forks of the original project. Summary of commits: [ Alexander Böhn ] * Allow `rocksdb.DB` instances to be manually closed. [ iFA ] * Many tidying changes. * Added support for many parameters in different interfaces. * Create statistics.pxd * Fixing closing [ Andrey Martyanov ] * Build wheel packages * Update README with simplified installation procedure [ Martina Ferrari ] * Fix a few typos. * Add as_dict option to multi_get. * Update README, set myself as current author/maintainer, and move most of setup.py to the configuration file. Version 0.7 ----------- Version released by @twmht; summary of commits: [ Ming-Hsuan-Tu ] * remove full_scan_mode * change default compaction_pri [ meridianz ] * Docs: fix typo in installation command line [ Roman Zeyde ] * Remove `fetch=False` unsupported keyword from db.iter{items,keys,values} documentation [ Abhiram R ] * Modified docs to export CPLUS_INCLUDE_PATH, LD_LIBRARY_PATH and LIBRARY_PATH correctly even if they weren't originally assigned * Added liblz4-dev as a package to be installed [ Jason Fried ] * Column Family Support. Add support for Column Families in a runtime safe way. Add unittests to test functionality Insure all unittests are passing. Cleaned up unittests to not use a fixed directory in tmp, but use tempfile Version 0.6 ----------- Version released by @twmht; summary of commits: [ Ming-Hsuan-Tu ] * now support rocksdb 5.3.0 * Merge options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes * add default merge operator * add compaction_pri * add seekForPrev * update the usage of default operators * fix memtable_factory crash * add testcase for memtable [ George Mossessian ] * allow snappy_compression as a default option in test_options.py::TestOptions::test_simple [ RIMPY BHAROT ] * Update installation.rst. Missing steps need to be added for clean installation. [ Chris Hager ] * OSX support for 'pip install' [ Mehdi Abaakouk ] * Allow to compile the extension everywhere. Version 0.5 ----------- Last version released by the @hofmockel; summary of commits: * Remove prints from the tests. * Use another compiler flag wich works for clang and gcc. * Wrap the RepairDB function. * Get rid of this 'extension_defaults' variable. * Only 'cythonize' if Cython is installed. * Add the .hpp .pxd .pyx files for the sdist. * Rename README.md to README.rst so setup.py can pick it up. * Update the installation page by mentioning a 'system wide' rocksdb installation. * Improve the README.rst by adding a quick install/using guide. * Don't set a theme explicitly. Let 'readthedocs' decide itself. * Change API of compact_range to be compatible with the change of rocksdb. * No need for the 'get_ob' methods on PyCache. * Add "row_cache" to options. * Document the new row_cache option. * Update the versions (python,rocksdb) pyrocksdb 0.4 was tested with. * Mention in the changelog that this version is avaialable on pypi. Version 0.4 ----------- This version works with RocksDB v3.12. * Added :py:func:`repair_db`. * Added :py:meth:`rocksdb.Options.row_cache` * Publish to pypi. Backward Incompatible Changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Changed API of :py:meth:`rocksdb.DB.compact_range`. * Only allow keyword arguments. * Changed ``reduce_level`` to ``change_level``. * Add new argument called ``bottommost_level_compaction``. Version 0.3 ----------- This version works with RocksDB version v3.11. Backward Incompatible Changes: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ **Prefix Seeks:** According to this page https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes, all the prefix related parameters on ``ReadOptions`` are removed. Rocksdb realizes now if ``Options.prefix_extractor`` is set and uses then prefix-seeks automatically. This means the following changes on pyrocksdb. * DB.iterkeys, DB.itervalues, DB.iteritems have *no* ``prefix`` parameter anymore. * DB.get, DB.multi_get, DB.key_may_exist, DB.iterkeys, DB.itervalues, DB.iteritems have *no* ``prefix_seek`` parameter anymore. Which means all the iterators walk now always to the *end* of the database. So if you need to stay within a prefix, write your own code to ensure that. For DB.iterkeys and DB.iteritems ``itertools.takewhile`` is a possible solution. :: from itertools import takewhile it = self.db.iterkeys() it.seek(b'00002') print list(takewhile(lambda key: key.startswith(b'00002'), it)) it = self.db.iteritems() it.seek(b'00002') print dict(takewhile(lambda item: item[0].startswith(b'00002'), it)) **SST Table Builders:** * Removed ``NewTotalOrderPlainTableFactory``, because rocksdb drops it too. **Changed Options:** In newer versions of rocksdb a bunch of options were moved or removed. * Rename ``bloom_bits_per_prefix`` of :py:class:`rocksdb.PlainTableFactory` to ``bloom_bits_per_key`` * Removed ``Options.db_stats_log_interval``. * Removed ``Options.disable_seek_compaction`` * Moved ``Options.no_block_cache`` to ``BlockBasedTableFactory`` * Moved ``Options.block_size`` to ``BlockBasedTableFactory`` * Moved ``Options.block_size_deviation`` to ``BlockBasedTableFactory`` * Moved ``Options.block_restart_interval`` to ``BlockBasedTableFactory`` * Moved ``Options.whole_key_filtering`` to ``BlockBasedTableFactory`` * Removed ``Options.table_cache_remove_scan_count_limit`` * Removed rm_scan_count_limit from ``LRUCache`` New: ^^^^ * Make CompactRange available: :py:meth:`rocksdb.DB.compact_range` * Add init options to :py:class:`rocksdb.BlockBasedTableFactory` * Add more option to :py:class:`rocksdb.PlainTableFactory` * Add :py:class:`rocksdb.WriteBatchIterator` * add :py:attr:`rocksdb.CompressionType.lz4_compression` * add :py:attr:`rocksdb.CompressionType.lz4hc_compression` Version 0.2 ----------- This version works with RocksDB version 2.8.fb. Now you have access to the more advanced options of rocksdb. Like changing the memtable or SST representation. It is also possible now to enable *Universal Style Compaction*. * Fixed `issue 3 `_. Which fixed the change of prefix_extractor from raw-pointer to smart-pointer. * Support the new :py:attr:`rocksdb.Options.verify_checksums_in_compaction` option. * Add :py:attr:`rocksdb.Options.table_factory` option. So you could use the new 'PlainTableFactories' which are optimized for in-memory-databases. * https://github.com/facebook/rocksdb/wiki/PlainTable-Format * https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database%3F * Add :py:attr:`rocksdb.Options.memtable_factory` option. * Add options :py:attr:`rocksdb.Options.compaction_style` and :py:attr:`rocksdb.Options.compaction_options_universal` to change the compaction style. * Update documentation to the new default values * allow_mmap_reads=true * allow_mmap_writes=false * max_background_flushes=1 * max_open_files=5000 * paranoid_checks=true * disable_seek_compaction=true * level0_stop_writes_trigger=24 * level0_slowdown_writes_trigger=20 * Document new property names for :py:meth:`rocksdb.DB.get_property`. Version 0.1 ----------- Initial version. Works with rocksdb version 2.7.fb. python-rocksdb-0.8.0~rc3/docs/conf.py000066400000000000000000000200721400433636700175310ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # python-rocksdb documentation build configuration file, created by # sphinx-quickstart on Tue Dec 31 12:50:54 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'python-rocksdb' copyright = u'2014, sh' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.6' # The full version, including alpha/beta/rc tags. release = '0.6.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'python-rocksdbdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'python-rocksdb.tex', u'python-rocksdb Documentation', u'sh', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'python-rocksdb', u'python-rocksdb Documentation', [u'sh'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'python-rocksdb', u'python-rocksdb Documentation', u'sh', 'python-rocksdb', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False python-rocksdb-0.8.0~rc3/docs/index.rst000066400000000000000000000017171400433636700201000ustar00rootroot00000000000000Welcome to python-rocksdb's documentation! ========================================== Overview -------- Python bindings to the C++ interface of http://rocksdb.org/ using cython:: import rocksdb db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) db.put(b"a", b"b") print db.get(b"a") Tested with python3.8 and python3.9 and RocksDB version 6.11.4. .. toctree:: :maxdepth: 2 Instructions how to install Tutorial API Changelog Contributing ------------ Source can be found on `github `_. Feel free to fork and send pull-requests or create issues on the `github issue tracker `_ RoadMap/TODO ------------ No plans so far. Please submit wishes to the github issues. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` python-rocksdb-0.8.0~rc3/docs/installation.rst000066400000000000000000000032351400433636700214670ustar00rootroot00000000000000Installing ========== .. highlight:: bash With distro package and pypi **************************** This requires librocksdb-dev>=5.0 .. code-block:: bash apt-get install python-virtualenv python-dev librocksdb-dev virtualenv venv source venv/bin/activate pip install python-rocksdb From source *********** Building rocksdb ---------------- Briefly describes how to build rocksdb under an ordinary debian/ubuntu. For more details consider https://github.com/facebook/rocksdb/blob/master/INSTALL.md .. code-block:: bash apt-get install build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev git clone https://github.com/facebook/rocksdb.git cd rocksdb mkdir build && cd build cmake .. make Systemwide rocksdb ^^^^^^^^^^^^^^^^^^ The following command installs the shared library in ``/usr/lib/`` and the header files in ``/usr/include/rocksdb/``:: make install-shared INSTALL_PATH=/usr To uninstall use:: make uninstall INSTALL_PATH=/usr Local rocksdb ^^^^^^^^^^^^^ If you don't like the system wide installation, or you don't have the permissions, it is possible to set the following environment variables. These varialbes are picked up by the compiler, linker and loader .. code-block:: bash export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:`pwd`/../include export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:`pwd` export LIBRARY_PATH=${LIBRARY_PATH}:`pwd` Building python-rocksdb ----------------------- .. code-block:: bash apt-get install python-virtualenv python-dev virtualenv venv source venv/bin/activate pip install git+git://github.com/NightTsarina/python-rocksdb.git#egg=python-rocksdb python-rocksdb-0.8.0~rc3/docs/tutorial/000077500000000000000000000000001400433636700200745ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/docs/tutorial/index.rst000066400000000000000000000260071400433636700217420ustar00rootroot00000000000000Basic Usage of python-rocksdb ***************************** Open ==== The most basic open call is :: import rocksdb db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) A more production ready open can look like this :: import rocksdb opts = rocksdb.Options() opts.create_if_missing = True opts.max_open_files = 300000 opts.write_buffer_size = 67108864 opts.max_write_buffer_number = 3 opts.target_file_size_base = 67108864 opts.table_factory = rocksdb.BlockBasedTableFactory( filter_policy=rocksdb.BloomFilterPolicy(10), block_cache=rocksdb.LRUCache(2 * (1024 ** 3)), block_cache_compressed=rocksdb.LRUCache(500 * (1024 ** 2))) db = rocksdb.DB("test.db", opts) It assings a cache of 2.5G, uses a bloom filter for faster lookups and keeps more data (64 MB) in memory before writting a .sst file. About Bytes And Unicode ======================= RocksDB stores all data as uninterpreted *byte strings*. pyrocksdb behaves the same and uses nearly everywhere byte strings too. In python2 this is the ``str`` type. In python3 the ``bytes`` type. Since the default string type for string literals differs between python 2 and 3, it is strongly recommended to use an explicit ``b`` prefix for all byte string literals in both python2 and python3 code. For example ``b'this is a byte string'``. This avoids ambiguity and ensures that your code keeps working as intended if you switch between python2 and python3. The only place where you can pass unicode objects are filesytem paths like * Directory name of the database itself :py:meth:`rocksdb.DB.__init__` * :py:attr:`rocksdb.Options.wal_dir` * :py:attr:`rocksdb.Options.db_log_dir` To encode this path name, `sys.getfilesystemencoding()` encoding is used. Access ====== Store, Get, Delete is straight forward :: # Store db.put(b"key", b"value") # Get db.get(b"key") # Delete db.delete(b"key") It is also possible to gather modifications and apply them in a single operation :: batch = rocksdb.WriteBatch() batch.put(b"key", b"v1") batch.delete(b"key") batch.put(b"key", b"v2") batch.put(b"key", b"v3") db.write(batch) Fetch of multiple values at once :: db.put(b"key1", b"v1") db.put(b"key2", b"v2") ret = db.multi_get([b"key1", b"key2", b"key3"]) # prints b"v1" print ret[b"key1"] # prints None print ret[b"key3"] Iteration ========= Iterators behave slightly different than expected. Per default they are not valid. So you have to call one of its seek methods first :: db.put(b"key1", b"v1") db.put(b"key2", b"v2") db.put(b"key3", b"v3") it = db.iterkeys() it.seek_to_first() # prints [b'key1', b'key2', b'key3'] print list(it) it.seek_to_last() # prints [b'key3'] print list(it) it.seek(b'key2') # prints [b'key2', b'key3'] print list(it) There are also methods to iterate over values/items :: it = db.itervalues() it.seek_to_first() # prints [b'v1', b'v2', b'v3'] print list(it) it = db.iteritems() it.seek_to_first() # prints [(b'key1', b'v1'), (b'key2, b'v2'), (b'key3', b'v3')] print list(it) Reversed iteration :: it = db.iteritems() it.seek_to_last() # prints [(b'key3', b'v3'), (b'key2', b'v2'), (b'key1', b'v1')] print list(reversed(it)) SeekForPrev (Take the example from `https://github.com/facebook/rocksdb/wiki/SeekForPrev`):: db.put(b'a1', b'a1_value') db.put(b'a3', b'a3_value') db.put(b'b1', b'b1_value') db.put(b'b2', b'b2_value') db.put(b'c2', b'c2_value') db.put(b'c4', b'c4_value') it = db.iteritems() it.seek(b'a1') assertEqual(it.get(), (b'a1', b'a1_value')) it.seek(b'a3') assertEqual(it.get(), (b'a3', b'a3_value')) it.seek_for_prev(b'c4') assertEqual(it.get(), (b'c4', b'c4_value')) it.seek_for_prev(b'c3') assertEqual(it.get(), (b'c2', b'c2_value')) Snapshots ========= Snapshots are nice to get a consistent view on the database :: self.db.put(b"a", b"1") self.db.put(b"b", b"2") snapshot = self.db.snapshot() self.db.put(b"a", b"2") self.db.delete(b"b") it = self.db.iteritems() it.seek_to_first() # prints {b'a': b'2'} print dict(it) it = self.db.iteritems(snapshot=snapshot) it.seek_to_first() # prints {b'a': b'1', b'b': b'2'} print dict(it) MergeOperator ============= Merge operators are useful for efficient read-modify-write operations. For more details see `Merge Operator `_ A python merge operator must either implement the :py:class:`rocksdb.interfaces.AssociativeMergeOperator` or :py:class:`rocksdb.interfaces.MergeOperator` interface. The following example python merge operator implements a counter :: class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator): def merge(self, key, existing_value, value): if existing_value: s = int(existing_value) + int(value) return (True, str(s).encode('ascii')) return (True, value) def name(self): return b'AssocCounter' opts = rocksdb.Options() opts.create_if_missing = True opts.merge_operator = AssocCounter() db = rocksdb.DB('test.db', opts) db.merge(b"a", b"1") db.merge(b"a", b"1") # prints b'2' print db.get(b"a") We provide a set of default operators ``rocksdb.merge_operators.UintAddOperator`` and ``rocksdb.merge_operators.StringAppendOperator``:: from rocksdb.merge_operators import UintAddOperator, StringAppendOperator opts = rocksdb.Options() opts.create_if_missing = True # you should also play with StringAppendOperator opts.merge_operator = UintAddOperator() db = rocksdb.DB('/tmp/test', opts) self.db.put(b'a', struct.pack('Q', 5566)) for x in range(1000): self.db.merge(b"a", struct.pack('Q', x)) self.assertEqual(5566 + sum(range(1000)), struct.unpack('Q', self.db.get(b'a'))[0]) PrefixExtractor =============== According to `Prefix API `_ a prefix_extractor can reduce IO for scans within a prefix range. A python prefix extractor must implement the :py:class:`rocksdb.interfaces.SliceTransform` interface. The following example presents a prefix extractor of a static size. So always the first 5 bytes are used as the prefix :: class StaticPrefix(rocksdb.interfaces.SliceTransform): def name(self): return b'static' def transform(self, src): return (0, 5) def in_domain(self, src): return len(src) >= 5 def in_range(self, dst): return len(dst) == 5 opts = rocksdb.Options() opts.create_if_missing=True opts.prefix_extractor = StaticPrefix() db = rocksdb.DB('test.db', opts) db.put(b'00001.x', b'x') db.put(b'00001.y', b'y') db.put(b'00001.z', b'z') db.put(b'00002.x', b'x') db.put(b'00002.y', b'y') db.put(b'00002.z', b'z') db.put(b'00003.x', b'x') db.put(b'00003.y', b'y') db.put(b'00003.z', b'z') prefix = b'00002' it = db.iteritems() it.seek(prefix) # prints {b'00002.z': b'z', b'00002.y': b'y', b'00002.x': b'x'} print dict(itertools.takewhile(lambda item: item[0].startswith(prefix), it)) Backup And Restore ================== Backup and Restore is done with a separate :py:class:`rocksdb.BackupEngine` object. A backup can only be created on a living database object. :: import rocksdb db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True)) db.put(b'a', b'v1') db.put(b'b', b'v2') db.put(b'c', b'v3') Backup is created like this. You can choose any path for the backup destination except the db path itself. If ``flush_before_backup`` is ``True`` the current memtable is flushed to disk before backup. :: backup = rocksdb.BackupEngine("test.db/backups") backup.create_backup(db, flush_before_backup=True) Restore is done like this. The two arguments are the db_dir and wal_dir, which are mostly the same. :: backup = rocksdb.BackupEngine("test.db/backups") backup.restore_latest_backup("test.db", "test.db") Change Memtable Or SST Implementations ====================================== As noted here :ref:`memtable_factories_label`, RocksDB offers different implementations for the memtable representation. Per default :py:class:`rocksdb.SkipListMemtableFactory` is used, but changing it to a different one is veary easy. Here is an example for HashSkipList-MemtableFactory. Keep in mind: To use the hashed based MemtableFactories you must set :py:attr:`rocksdb.Options.prefix_extractor`. In this example all keys have a static prefix of len 5. :: class StaticPrefix(rocksdb.interfaces.SliceTransform): def name(self): return b'static' def transform(self, src): return (0, 5) def in_domain(self, src): return len(src) >= 5 def in_range(self, dst): return len(dst) == 5 opts = rocksdb.Options() opts.prefix_extractor = StaticPrefix() opts.allow_concurrent_memtable_write = False opts.memtable_factory = rocksdb.HashSkipListMemtableFactory() opts.create_if_missing = True db = rocksdb.DB("test.db", opts) db.put(b'00001.x', b'x') db.put(b'00001.y', b'y') db.put(b'00002.x', b'x') For initial bulk loads the Vector-MemtableFactory makes sense. :: opts = rocksdb.Options() opts.allow_concurrent_memtable_write = False opts.memtable_factory = rocksdb.VectorMemtableFactory() opts.create_if_missing = True db = rocksdb.DB("test.db", opts) As noted here :ref:`table_factories_label`, it is also possible to change the representation of the final data files. Here is an example how to use a 'PlainTable'. :: opts = rocksdb.Options() opts.table_factory = rocksdb.PlainTableFactory() opts.create_if_missing = True db = rocksdb.DB("test.db", opts) Change Compaction Style ======================= RocksDB has a compaction algorithm called *universal*. This style typically results in lower write amplification but higher space amplification than Level Style Compaction. See here for more details, https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide#multi-threaded-compactions Here is an example to switch to *universal style compaction*. :: opts = rocksdb.Options() opts.compaction_style = "universal" opts.compaction_options_universal = {"min_merge_width": 3} See here for more options on *universal style compaction*, :py:attr:`rocksdb.Options.compaction_options_universal` Iterate Over WriteBatch ======================= In same cases you need to know, what operations happened on a WriteBatch. The pyrocksdb WriteBatch supports the iterator protocol, see this example. :: batch = rocksdb.WriteBatch() batch.put(b"key1", b"v1") batch.delete(b'a') batch.merge(b'xxx', b'value') for op, key, value in batch: print op, key, value # prints the following three lines # Put key1 v1 # Delete a # Merge xxx value python-rocksdb-0.8.0~rc3/pyproject.toml000066400000000000000000000001321400433636700202110ustar00rootroot00000000000000[build-system] requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" python-rocksdb-0.8.0~rc3/rocksdb/000077500000000000000000000000001400433636700167305ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/rocksdb/__init__.py000066400000000000000000000000301400433636700210320ustar00rootroot00000000000000from ._rocksdb import * python-rocksdb-0.8.0~rc3/rocksdb/_rocksdb.pyx000066400000000000000000002424471400433636700212750ustar00rootroot00000000000000#cython: language_level=3 import cython from libcpp.string cimport string from libcpp.deque cimport deque from libcpp.vector cimport vector from cpython cimport bool as py_bool from libcpp cimport bool as cpp_bool from libc.stdint cimport uint32_t from cython.operator cimport dereference as deref from cpython.bytes cimport PyBytes_AsString from cpython.bytes cimport PyBytes_Size from cpython.bytes cimport PyBytes_FromString from cpython.bytes cimport PyBytes_FromStringAndSize from cpython.unicode cimport PyUnicode_Decode from .std_memory cimport shared_ptr from . cimport options from . cimport merge_operator from . cimport filter_policy from . cimport comparator from . cimport slice_transform from . cimport cache from . cimport logger from . cimport snapshot from . cimport db from . cimport iterator from . cimport backup from . cimport env from . cimport table_factory from . cimport memtablerep from . cimport universal_compaction # Enums are the only exception for direct imports # Their name als already unique enough from .universal_compaction cimport kCompactionStopStyleSimilarSize from .universal_compaction cimport kCompactionStopStyleTotalSize from .options cimport kCompactionStyleLevel from .options cimport kCompactionStyleUniversal from .options cimport kCompactionStyleFIFO from .options cimport kCompactionStyleNone from .slice_ cimport Slice from .status cimport Status import sys from .interfaces import MergeOperator as IMergeOperator from .interfaces import AssociativeMergeOperator as IAssociativeMergeOperator from .interfaces import FilterPolicy as IFilterPolicy from .interfaces import Comparator as IComparator from .interfaces import SliceTransform as ISliceTransform import traceback from .errors import NotFound from .errors import Corruption from .errors import NotSupported from .errors import InvalidArgument from .errors import RocksIOError from .errors import MergeInProgress from .errors import Incomplete import weakref ctypedef const filter_policy.FilterPolicy ConstFilterPolicy cdef extern from "cpp/utils.hpp" namespace "py_rocks": cdef const Slice* vector_data(vector[Slice]&) # Prepare python for threaded usage. # Python callbacks (merge, comparator) # could be executed in a rocksdb background thread (eg. compaction). cdef extern from "Python.h": void PyEval_InitThreads() PyEval_InitThreads() ## Here comes the stuff to wrap the status to exception cdef check_status(const Status& st): if st.ok(): return if st.IsNotFound(): raise NotFound(st.ToString()) if st.IsCorruption(): raise Corruption(st.ToString()) if st.IsNotSupported(): raise NotSupported(st.ToString()) if st.IsInvalidArgument(): raise InvalidArgument(st.ToString()) if st.IsIOError(): raise RocksIOError(st.ToString()) if st.IsMergeInProgress(): raise MergeInProgress(st.ToString()) if st.IsIncomplete(): raise Incomplete(st.ToString()) raise Exception("Unknown error: %s" % st.ToString()) ###################################################### cdef string bytes_to_string(path) except *: return string(PyBytes_AsString(path), PyBytes_Size(path)) cdef string_to_bytes(string ob): return PyBytes_FromStringAndSize(ob.c_str(), ob.size()) cdef Slice bytes_to_slice(ob) except *: return Slice(PyBytes_AsString(ob), PyBytes_Size(ob)) cdef slice_to_bytes(Slice sl): return PyBytes_FromStringAndSize(sl.data(), sl.size()) ## only for filsystem paths cdef string path_to_string(object path) except *: if isinstance(path, bytes): return bytes_to_string(path) if isinstance(path, unicode): path = path.encode(sys.getfilesystemencoding()) return bytes_to_string(path) else: raise TypeError("Wrong type for path: %s" % path) cdef object string_to_path(string path): fs_encoding = sys.getfilesystemencoding().encode('ascii') return PyUnicode_Decode(path.c_str(), path.size(), fs_encoding, "replace") ## Here comes the stuff for the comparator @cython.internal cdef class PyComparator(object): cdef object get_ob(self): return None cdef const comparator.Comparator* get_comparator(self): return NULL cdef set_info_log(self, shared_ptr[logger.Logger] info_log): pass @cython.internal cdef class PyGenericComparator(PyComparator): cdef comparator.ComparatorWrapper* comparator_ptr cdef object ob def __cinit__(self, object ob): self.comparator_ptr = NULL if not isinstance(ob, IComparator): raise TypeError("%s is not of type %s" % (ob, IComparator)) self.ob = ob self.comparator_ptr = new comparator.ComparatorWrapper( bytes_to_string(ob.name()), ob, compare_callback) def __dealloc__(self): if not self.comparator_ptr == NULL: del self.comparator_ptr cdef object get_ob(self): return self.ob cdef const comparator.Comparator* get_comparator(self): return self.comparator_ptr cdef set_info_log(self, shared_ptr[logger.Logger] info_log): self.comparator_ptr.set_info_log(info_log) @cython.internal cdef class PyBytewiseComparator(PyComparator): cdef const comparator.Comparator* comparator_ptr def __cinit__(self): self.comparator_ptr = comparator.BytewiseComparator() def name(self): return PyBytes_FromString(self.comparator_ptr.Name()) def compare(self, a, b): return self.comparator_ptr.Compare( bytes_to_slice(a), bytes_to_slice(b)) cdef object get_ob(self): return self cdef const comparator.Comparator* get_comparator(self): return self.comparator_ptr cdef int compare_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice& a, const Slice& b) with gil: try: return (ctx).compare(slice_to_bytes(a), slice_to_bytes(b)) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in compare callback: %s", tb) error_msg.assign(str(error)) BytewiseComparator = PyBytewiseComparator ######################################### ## Here comes the stuff for the filter policy @cython.internal cdef class PyFilterPolicy(object): cdef object get_ob(self): return None cdef shared_ptr[ConstFilterPolicy] get_policy(self): return shared_ptr[ConstFilterPolicy]() cdef set_info_log(self, shared_ptr[logger.Logger] info_log): pass @cython.internal cdef class PyGenericFilterPolicy(PyFilterPolicy): cdef shared_ptr[filter_policy.FilterPolicyWrapper] policy cdef object ob def __cinit__(self, object ob): if not isinstance(ob, IFilterPolicy): raise TypeError("%s is not of type %s" % (ob, IFilterPolicy)) self.ob = ob self.policy.reset(new filter_policy.FilterPolicyWrapper( bytes_to_string(ob.name()), ob, create_filter_callback, key_may_match_callback)) cdef object get_ob(self): return self.ob cdef shared_ptr[ConstFilterPolicy] get_policy(self): return (self.policy) cdef set_info_log(self, shared_ptr[logger.Logger] info_log): self.policy.get().set_info_log(info_log) cdef void create_filter_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice* keys, int n, string* dst) with gil: try: ret = (ctx).create_filter( [slice_to_bytes(keys[i]) for i in range(n)]) dst.append(bytes_to_string(ret)) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in create filter callback: %s", tb) error_msg.assign(str(error)) cdef cpp_bool key_may_match_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice& key, const Slice& filt) with gil: try: return (ctx).key_may_match( slice_to_bytes(key), slice_to_bytes(filt)) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in key_mach_match callback: %s", tb) error_msg.assign(str(error)) @cython.internal cdef class PyBloomFilterPolicy(PyFilterPolicy): cdef shared_ptr[ConstFilterPolicy] policy def __cinit__(self, int bits_per_key): self.policy.reset(filter_policy.NewBloomFilterPolicy(bits_per_key)) def name(self): return PyBytes_FromString(self.policy.get().Name()) def create_filter(self, keys): cdef string dst cdef vector[Slice] c_keys for key in keys: c_keys.push_back(bytes_to_slice(key)) self.policy.get().CreateFilter( vector_data(c_keys), c_keys.size(), cython.address(dst)) return string_to_bytes(dst) def key_may_match(self, key, filter_): return self.policy.get().KeyMayMatch( bytes_to_slice(key), bytes_to_slice(filter_)) cdef object get_ob(self): return self cdef shared_ptr[ConstFilterPolicy] get_policy(self): return self.policy BloomFilterPolicy = PyBloomFilterPolicy ############################################# ## Here comes the stuff for the merge operator @cython.internal cdef class PyMergeOperator(object): cdef shared_ptr[merge_operator.MergeOperator] merge_op cdef object ob def __cinit__(self, object ob): self.ob = ob if isinstance(ob, IAssociativeMergeOperator): self.merge_op.reset( new merge_operator.AssociativeMergeOperatorWrapper( bytes_to_string(ob.name()), (ob), merge_callback)) elif isinstance(ob, IMergeOperator): self.merge_op.reset( new merge_operator.MergeOperatorWrapper( bytes_to_string(ob.name()), ob, ob, full_merge_callback, partial_merge_callback)) # elif isinstance(ob, str): # if ob == "put": # self.merge_op = merge_operator.MergeOperators.CreatePutOperator() # elif ob == "put_v1": # self.merge_op = merge_operator.MergeOperators.CreateDeprecatedPutOperator() # elif ob == "uint64add": # self.merge_op = merge_operator.MergeOperators.CreateUInt64AddOperator() # elif ob == "stringappend": # self.merge_op = merge_operator.MergeOperators.CreateStringAppendOperator() # #TODO: necessary? # # elif ob == "stringappendtest": # # self.merge_op = merge_operator.MergeOperators.CreateStringAppendTESTOperator() # elif ob == "max": # self.merge_op = merge_operator.MergeOperators.CreateMaxOperator() # else: # msg = "{0} is not the default type".format(ob) # raise TypeError(msg) else: msg = "%s is not of this types %s" msg %= (ob, (IAssociativeMergeOperator, IMergeOperator)) raise TypeError(msg) cdef object get_ob(self): return self.ob cdef shared_ptr[merge_operator.MergeOperator] get_operator(self): return self.merge_op cdef cpp_bool merge_callback( void* ctx, const Slice& key, const Slice* existing_value, const Slice& value, string* new_value, logger.Logger* log) with gil: if existing_value == NULL: py_existing_value = None else: py_existing_value = slice_to_bytes(deref(existing_value)) try: ret = (ctx).merge( slice_to_bytes(key), py_existing_value, slice_to_bytes(value)) if ret[0]: new_value.assign(bytes_to_string(ret[1])) return True return False except: tb = traceback.format_exc() logger.Log(log, "Error in merge_callback: %s", tb) return False cdef cpp_bool full_merge_callback( void* ctx, const Slice& key, const Slice* existing_value, const deque[string]& op_list, string* new_value, logger.Logger* log) with gil: if existing_value == NULL: py_existing_value = None else: py_existing_value = slice_to_bytes(deref(existing_value)) try: ret = (ctx).full_merge( slice_to_bytes(key), py_existing_value, [string_to_bytes(op_list[i]) for i in range(op_list.size())]) if ret[0]: new_value.assign(bytes_to_string(ret[1])) return True return False except: tb = traceback.format_exc() logger.Log(log, "Error in full_merge_callback: %s", tb) return False cdef cpp_bool partial_merge_callback( void* ctx, const Slice& key, const Slice& left_op, const Slice& right_op, string* new_value, logger.Logger* log) with gil: try: ret = (ctx).partial_merge( slice_to_bytes(key), slice_to_bytes(left_op), slice_to_bytes(right_op)) if ret[0]: new_value.assign(bytes_to_string(ret[1])) return True return False except: tb = traceback.format_exc() logger.Log(log, "Error in partial_merge_callback: %s", tb) return False ############################################## #### Here comes the Cache stuff @cython.internal cdef class PyCache(object): cdef shared_ptr[cache.Cache] get_cache(self): return shared_ptr[cache.Cache]() @cython.internal cdef class PyLRUCache(PyCache): cdef shared_ptr[cache.Cache] cache_ob def __cinit__(self, capacity, shard_bits=None): if shard_bits is not None: self.cache_ob = cache.NewLRUCache(capacity, shard_bits) else: self.cache_ob = cache.NewLRUCache(capacity) cdef shared_ptr[cache.Cache] get_cache(self): return self.cache_ob LRUCache = PyLRUCache ############################### ### Here comes the stuff for SliceTransform @cython.internal cdef class PySliceTransform(object): cdef shared_ptr[slice_transform.SliceTransform] transfomer cdef object ob def __cinit__(self, object ob): if not isinstance(ob, ISliceTransform): raise TypeError("%s is not of type %s" % (ob, ISliceTransform)) self.ob = ob self.transfomer.reset( new slice_transform.SliceTransformWrapper( bytes_to_string(ob.name()), ob, slice_transform_callback, slice_in_domain_callback, slice_in_range_callback)) cdef object get_ob(self): return self.ob cdef shared_ptr[slice_transform.SliceTransform] get_transformer(self): return self.transfomer cdef set_info_log(self, shared_ptr[logger.Logger] info_log): cdef slice_transform.SliceTransformWrapper* ptr ptr = self.transfomer.get() ptr.set_info_log(info_log) cdef Slice slice_transform_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice& src) with gil: cdef size_t offset cdef size_t size try: ret = (ctx).transform(slice_to_bytes(src)) offset = ret[0] size = ret[1] if (offset + size) > src.size(): msg = "offset(%i) + size(%i) is bigger than slice(%i)" raise Exception(msg % (offset, size, src.size())) return Slice(src.data() + offset, size) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in slice transform callback: %s", tb) error_msg.assign(str(error)) cdef cpp_bool slice_in_domain_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice& src) with gil: try: return (ctx).in_domain(slice_to_bytes(src)) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in slice transform callback: %s", tb) error_msg.assign(str(error)) cdef cpp_bool slice_in_range_callback( void* ctx, logger.Logger* log, string& error_msg, const Slice& src) with gil: try: return (ctx).in_range(slice_to_bytes(src)) except BaseException as error: tb = traceback.format_exc() logger.Log(log, "Error in slice transform callback: %s", tb) error_msg.assign(str(error)) ########################################### ## Here are the TableFactories @cython.internal cdef class PyTableFactory(object): cdef shared_ptr[table_factory.TableFactory] factory cdef shared_ptr[table_factory.TableFactory] get_table_factory(self): return self.factory cdef set_info_log(self, shared_ptr[logger.Logger] info_log): pass cdef class BlockBasedTableFactory(PyTableFactory): cdef PyFilterPolicy py_filter_policy def __init__(self, index_type='binary_search', py_bool hash_index_allow_collision=True, checksum='crc32', PyCache block_cache=None, PyCache block_cache_compressed=None, filter_policy=None, no_block_cache=False, block_size=None, block_size_deviation=None, block_restart_interval=None, whole_key_filtering=None, enable_index_compression=False, cache_index_and_filter_blocks=False, format_version=2, ): cdef table_factory.BlockBasedTableOptions table_options if index_type == 'binary_search': table_options.index_type = table_factory.kBinarySearch elif index_type == 'hash_search': table_options.index_type = table_factory.kHashSearch else: raise ValueError("Unknown index_type: %s" % index_type) if hash_index_allow_collision: table_options.hash_index_allow_collision = True else: table_options.hash_index_allow_collision = False if enable_index_compression: table_options.enable_index_compression = True else: table_options.enable_index_compression = False if checksum == 'crc32': table_options.checksum = table_factory.kCRC32c elif checksum == 'xxhash': table_options.checksum = table_factory.kxxHash else: raise ValueError("Unknown checksum: %s" % checksum) if no_block_cache: table_options.no_block_cache = True else: table_options.no_block_cache = False # If the following options are None use the rocksdb default. if block_size is not None: table_options.block_size = block_size if block_size_deviation is not None: table_options.block_size_deviation = block_size_deviation if block_restart_interval is not None: table_options.block_restart_interval = block_restart_interval if whole_key_filtering is not None: if whole_key_filtering: table_options.whole_key_filtering = True else: table_options.whole_key_filtering = False if cache_index_and_filter_blocks is not None: if cache_index_and_filter_blocks: table_options.cache_index_and_filter_blocks = True else: table_options.cache_index_and_filter_blocks = False if block_cache is not None: table_options.block_cache = block_cache.get_cache() if block_cache_compressed is not None: table_options.block_cache_compressed = block_cache_compressed.get_cache() if format_version is not None: table_options.format_version = format_version # Set the filter_policy self.py_filter_policy = None if filter_policy is not None: if isinstance(filter_policy, PyFilterPolicy): if (filter_policy).get_policy().get() == NULL: raise Exception("Cannot set filter policy: %s" % filter_policy) self.py_filter_policy = filter_policy else: self.py_filter_policy = PyGenericFilterPolicy(filter_policy) table_options.filter_policy = self.py_filter_policy.get_policy() self.factory.reset(table_factory.NewBlockBasedTableFactory(table_options)) cdef set_info_log(self, shared_ptr[logger.Logger] info_log): if self.py_filter_policy is not None: self.py_filter_policy.set_info_log(info_log) cdef class PlainTableFactory(PyTableFactory): def __init__( self, user_key_len=0, bloom_bits_per_key=10, hash_table_ratio=0.75, index_sparseness=10, huge_page_tlb_size=0, encoding_type='plain', py_bool full_scan_mode=False): cdef table_factory.PlainTableOptions table_options table_options.user_key_len = user_key_len table_options.bloom_bits_per_key = bloom_bits_per_key table_options.hash_table_ratio = hash_table_ratio table_options.index_sparseness = index_sparseness table_options.huge_page_tlb_size = huge_page_tlb_size if encoding_type == 'plain': table_options.encoding_type = table_factory.kPlain elif encoding_type == 'prefix': table_options.encoding_type = table_factory.kPrefix else: raise ValueError("Unknown encoding_type: %s" % encoding_type) table_options.full_scan_mode = full_scan_mode self.factory.reset( table_factory.NewPlainTableFactory(table_options)) ############################################# ### Here are the MemtableFactories @cython.internal cdef class PyMemtableFactory(object): cdef shared_ptr[memtablerep.MemTableRepFactory] factory cdef shared_ptr[memtablerep.MemTableRepFactory] get_memtable_factory(self): return self.factory cdef class SkipListMemtableFactory(PyMemtableFactory): def __init__(self): self.factory.reset(memtablerep.NewSkipListFactory()) cdef class VectorMemtableFactory(PyMemtableFactory): def __init__(self, count=0): self.factory.reset(memtablerep.NewVectorRepFactory(count)) cdef class HashSkipListMemtableFactory(PyMemtableFactory): def __init__( self, bucket_count=1000000, skiplist_height=4, skiplist_branching_factor=4): self.factory.reset( memtablerep.NewHashSkipListRepFactory( bucket_count, skiplist_height, skiplist_branching_factor)) cdef class HashLinkListMemtableFactory(PyMemtableFactory): def __init__(self, bucket_count=50000): self.factory.reset(memtablerep.NewHashLinkListRepFactory(bucket_count)) ################################## cdef class CompressionType(object): no_compression = u'no_compression' snappy_compression = u'snappy_compression' zlib_compression = u'zlib_compression' bzip2_compression = u'bzip2_compression' lz4_compression = u'lz4_compression' lz4hc_compression = u'lz4hc_compression' xpress_compression = u'xpress_compression' zstd_compression = u'zstd_compression' zstdnotfinal_compression = u'zstdnotfinal_compression' disable_compression = u'disable_compression' cdef class CompactionPri(object): by_compensated_size = u'by_compensated_size' oldest_largest_seq_first = u'oldest_largest_seq_first' oldest_smallest_seq_first = u'oldest_smallest_seq_first' min_overlapping_ratio = u'min_overlapping_ratio' @cython.internal cdef class _ColumnFamilyHandle: """ This is an internal class that we will weakref for safety """ cdef db.ColumnFamilyHandle* handle cdef object __weakref__ cdef object weak_handle def __cinit__(self): self.handle = NULL def __dealloc__(self): if not self.handle == NULL: del self.handle @staticmethod cdef from_handle_ptr(db.ColumnFamilyHandle* handle): inst = <_ColumnFamilyHandle>_ColumnFamilyHandle.__new__(_ColumnFamilyHandle) inst.handle = handle return inst @property def name(self): return self.handle.GetName() @property def id(self): return self.handle.GetID() @property def weakref(self): if self.weak_handle is None: self.weak_handle = ColumnFamilyHandle.from_wrapper(self) return self.weak_handle cdef class ColumnFamilyHandle: """ This represents a ColumnFamilyHandle """ cdef object _ref cdef readonly bytes name cdef readonly int id def __cinit__(self, weakhandle): self._ref = weakhandle self.name = self._ref().name self.id = self._ref().id def __init__(self, *): raise TypeError("These can not be constructed from Python") @staticmethod cdef object from_wrapper(_ColumnFamilyHandle real_handle): return ColumnFamilyHandle.__new__(ColumnFamilyHandle, weakref.ref(real_handle)) @property def is_valid(self): return self._ref() is not None def __repr__(self): valid = "valid" if self.is_valid else "invalid" return f"" cdef db.ColumnFamilyHandle* get_handle(self) except NULL: cdef _ColumnFamilyHandle real_handle = self._ref() if real_handle is None: raise ValueError(f"{self} is no longer a valid ColumnFamilyHandle!") return real_handle.handle def __eq__(self, other): cdef ColumnFamilyHandle fast_other if isinstance(other, ColumnFamilyHandle): fast_other = other return ( self.name == fast_other.name and self.id == fast_other.id and self._ref == fast_other._ref ) return False def __lt__(self, other): cdef ColumnFamilyHandle fast_other if isinstance(other, ColumnFamilyHandle): return self.id < other.id return NotImplemented # Since @total_ordering isn't a thing for cython def __ne__(self, other): return not self == other def __gt__(self, other): return other < self def __le__(self, other): return not other < self def __ge__(self, other): return not self < other def __hash__(self): # hash of a weakref matches that of its original ref'ed object # so we use the id of our weakref object here to prevent # a situation where we are invalid, but match a valid handle's hash return hash((self.id, self.name, id(self._ref))) cdef class ColumnFamilyOptions(object): cdef options.ColumnFamilyOptions* copts cdef PyComparator py_comparator cdef PyMergeOperator py_merge_operator cdef PySliceTransform py_prefix_extractor cdef PyTableFactory py_table_factory cdef PyMemtableFactory py_memtable_factory # Used to protect sharing of Options with many DB-objects cdef cpp_bool in_use def __cinit__(self): self.copts = NULL self.copts = new options.ColumnFamilyOptions() self.in_use = False def __dealloc__(self): if not self.copts == NULL: del self.copts def __init__(self, **kwargs): self.py_comparator = BytewiseComparator() self.py_merge_operator = None self.py_prefix_extractor = None self.py_table_factory = None self.py_memtable_factory = None for key, value in kwargs.items(): setattr(self, key, value) property write_buffer_size: def __get__(self): return self.copts.write_buffer_size def __set__(self, value): self.copts.write_buffer_size = value property max_write_buffer_number: def __get__(self): return self.copts.max_write_buffer_number def __set__(self, value): self.copts.max_write_buffer_number = value property min_write_buffer_number_to_merge: def __get__(self): return self.copts.min_write_buffer_number_to_merge def __set__(self, value): self.copts.min_write_buffer_number_to_merge = value property compression_opts: def __get__(self): cdef dict ret_ob = {} ret_ob['window_bits'] = self.copts.compression_opts.window_bits ret_ob['level'] = self.copts.compression_opts.level ret_ob['strategy'] = self.copts.compression_opts.strategy ret_ob['max_dict_bytes'] = self.copts.compression_opts.max_dict_bytes return ret_ob def __set__(self, dict value): cdef options.CompressionOptions* copts copts = cython.address(self.copts.compression_opts) # CompressionOptions(int wbits, int _lev, int _strategy, int _max_dict_bytes) if 'window_bits' in value: copts.window_bits = value['window_bits'] if 'level' in value: copts.level = value['level'] if 'strategy' in value: copts.strategy = value['strategy'] if 'max_dict_bytes' in value: copts.max_dict_bytes = value['max_dict_bytes'] property compaction_pri: def __get__(self): if self.copts.compaction_pri == options.kByCompensatedSize: return CompactionPri.by_compensated_size if self.copts.compaction_pri == options.kOldestLargestSeqFirst: return CompactionPri.oldest_largest_seq_first if self.copts.compaction_pri == options.kOldestSmallestSeqFirst: return CompactionPri.oldest_smallest_seq_first if self.copts.compaction_pri == options.kMinOverlappingRatio: return CompactionPri.min_overlapping_ratio def __set__(self, value): if value == CompactionPri.by_compensated_size: self.copts.compaction_pri = options.kByCompensatedSize elif value == CompactionPri.oldest_largest_seq_first: self.copts.compaction_pri = options.kOldestLargestSeqFirst elif value == CompactionPri.oldest_smallest_seq_first: self.copts.compaction_pri = options.kOldestSmallestSeqFirst elif value == CompactionPri.min_overlapping_ratio: self.copts.compaction_pri = options.kMinOverlappingRatio else: raise TypeError("Unknown compaction pri: %s" % value) property compression: def __get__(self): if self.copts.compression == options.kNoCompression: return CompressionType.no_compression elif self.copts.compression == options.kSnappyCompression: return CompressionType.snappy_compression elif self.copts.compression == options.kZlibCompression: return CompressionType.zlib_compression elif self.copts.compression == options.kBZip2Compression: return CompressionType.bzip2_compression elif self.copts.compression == options.kLZ4Compression: return CompressionType.lz4_compression elif self.copts.compression == options.kLZ4HCCompression: return CompressionType.lz4hc_compression elif self.copts.compression == options.kXpressCompression: return CompressionType.xpress_compression elif self.copts.compression == options.kZSTD: return CompressionType.zstd_compression elif self.copts.compression == options.kZSTDNotFinalCompression: return CompressionType.zstdnotfinal_compression elif self.copts.compression == options.kDisableCompressionOption: return CompressionType.disable_compression else: raise Exception("Unknown type: %s" % self.opts.compression) def __set__(self, value): if value == CompressionType.no_compression: self.copts.compression = options.kNoCompression elif value == CompressionType.snappy_compression: self.copts.compression = options.kSnappyCompression elif value == CompressionType.zlib_compression: self.copts.compression = options.kZlibCompression elif value == CompressionType.bzip2_compression: self.copts.compression = options.kBZip2Compression elif value == CompressionType.lz4_compression: self.copts.compression = options.kLZ4Compression elif value == CompressionType.lz4hc_compression: self.copts.compression = options.kLZ4HCCompression elif value == CompressionType.zstd_compression: self.copts.compression = options.kZSTD elif value == CompressionType.zstdnotfinal_compression: self.copts.compression = options.kZSTDNotFinalCompression elif value == CompressionType.disable_compression: self.copts.compression = options.kDisableCompressionOption else: raise TypeError("Unknown compression: %s" % value) property max_compaction_bytes: def __get__(self): return self.copts.max_compaction_bytes def __set__(self, value): self.copts.max_compaction_bytes = value property num_levels: def __get__(self): return self.copts.num_levels def __set__(self, value): self.copts.num_levels = value property level0_file_num_compaction_trigger: def __get__(self): return self.copts.level0_file_num_compaction_trigger def __set__(self, value): self.copts.level0_file_num_compaction_trigger = value property level0_slowdown_writes_trigger: def __get__(self): return self.copts.level0_slowdown_writes_trigger def __set__(self, value): self.copts.level0_slowdown_writes_trigger = value property level0_stop_writes_trigger: def __get__(self): return self.copts.level0_stop_writes_trigger def __set__(self, value): self.copts.level0_stop_writes_trigger = value property max_mem_compaction_level: def __get__(self): return self.copts.max_mem_compaction_level def __set__(self, value): self.copts.max_mem_compaction_level = value property target_file_size_base: def __get__(self): return self.copts.target_file_size_base def __set__(self, value): self.copts.target_file_size_base = value property target_file_size_multiplier: def __get__(self): return self.copts.target_file_size_multiplier def __set__(self, value): self.copts.target_file_size_multiplier = value property max_bytes_for_level_base: def __get__(self): return self.copts.max_bytes_for_level_base def __set__(self, value): self.copts.max_bytes_for_level_base = value property max_bytes_for_level_multiplier: def __get__(self): return self.copts.max_bytes_for_level_multiplier def __set__(self, value): self.copts.max_bytes_for_level_multiplier = value property max_bytes_for_level_multiplier_additional: def __get__(self): return self.copts.max_bytes_for_level_multiplier_additional def __set__(self, value): self.copts.max_bytes_for_level_multiplier_additional = value property soft_rate_limit: def __get__(self): return self.copts.soft_rate_limit def __set__(self, value): self.copts.soft_rate_limit = value property hard_rate_limit: def __get__(self): return self.copts.hard_rate_limit def __set__(self, value): self.copts.hard_rate_limit = value property rate_limit_delay_max_milliseconds: def __get__(self): return self.copts.rate_limit_delay_max_milliseconds def __set__(self, value): self.copts.rate_limit_delay_max_milliseconds = value property arena_block_size: def __get__(self): return self.copts.arena_block_size def __set__(self, value): self.copts.arena_block_size = value property disable_auto_compactions: def __get__(self): return self.copts.disable_auto_compactions def __set__(self, value): self.copts.disable_auto_compactions = value property purge_redundant_kvs_while_flush: def __get__(self): return self.copts.purge_redundant_kvs_while_flush def __set__(self, value): self.copts.purge_redundant_kvs_while_flush = value # FIXME: remove to util/options_helper.h # property allow_os_buffer: # def __get__(self): # return self.copts.allow_os_buffer # def __set__(self, value): # self.copts.allow_os_buffer = value property compaction_style: def __get__(self): if self.copts.compaction_style == kCompactionStyleLevel: return 'level' if self.copts.compaction_style == kCompactionStyleUniversal: return 'universal' if self.copts.compaction_style == kCompactionStyleFIFO: return 'fifo' if self.copts.compaction_style == kCompactionStyleNone: return 'none' raise Exception("Unknown compaction_style") def __set__(self, str value): if value == 'level': self.copts.compaction_style = kCompactionStyleLevel elif value == 'universal': self.copts.compaction_style = kCompactionStyleUniversal elif value == 'fifo': self.copts.compaction_style = kCompactionStyleFIFO elif value == 'none': self.copts.compaction_style = kCompactionStyleNone else: raise Exception("Unknown compaction style") property compaction_options_universal: def __get__(self): cdef universal_compaction.CompactionOptionsUniversal uopts cdef dict ret_ob = {} uopts = self.copts.compaction_options_universal ret_ob['size_ratio'] = uopts.size_ratio ret_ob['min_merge_width'] = uopts.min_merge_width ret_ob['max_merge_width'] = uopts.max_merge_width ret_ob['max_size_amplification_percent'] = uopts.max_size_amplification_percent ret_ob['compression_size_percent'] = uopts.compression_size_percent if uopts.stop_style == kCompactionStopStyleSimilarSize: ret_ob['stop_style'] = 'similar_size' elif uopts.stop_style == kCompactionStopStyleTotalSize: ret_ob['stop_style'] = 'total_size' else: raise Exception("Unknown compaction style") return ret_ob def __set__(self, dict value): cdef universal_compaction.CompactionOptionsUniversal* uopts uopts = cython.address(self.copts.compaction_options_universal) if 'size_ratio' in value: uopts.size_ratio = value['size_ratio'] if 'min_merge_width' in value: uopts.min_merge_width = value['min_merge_width'] if 'max_merge_width' in value: uopts.max_merge_width = value['max_merge_width'] if 'max_size_amplification_percent' in value: uopts.max_size_amplification_percent = value['max_size_amplification_percent'] if 'compression_size_percent' in value: uopts.compression_size_percent = value['compression_size_percent'] if 'stop_style' in value: if value['stop_style'] == 'similar_size': uopts.stop_style = kCompactionStopStyleSimilarSize elif value['stop_style'] == 'total_size': uopts.stop_style = kCompactionStopStyleTotalSize else: raise Exception("Unknown compaction style") # Deprecate # property filter_deletes: # def __get__(self): # return self.copts.filter_deletes # def __set__(self, value): # self.copts.filter_deletes = value property max_sequential_skip_in_iterations: def __get__(self): return self.copts.max_sequential_skip_in_iterations def __set__(self, value): self.copts.max_sequential_skip_in_iterations = value property inplace_update_support: def __get__(self): return self.copts.inplace_update_support def __set__(self, value): self.copts.inplace_update_support = value property table_factory: def __get__(self): return self.py_table_factory def __set__(self, PyTableFactory value): self.py_table_factory = value self.copts.table_factory = value.get_table_factory() property memtable_factory: def __get__(self): return self.py_memtable_factory def __set__(self, PyMemtableFactory value): self.py_memtable_factory = value self.copts.memtable_factory = value.get_memtable_factory() property inplace_update_num_locks: def __get__(self): return self.copts.inplace_update_num_locks def __set__(self, value): self.copts.inplace_update_num_locks = value property comparator: def __get__(self): return self.py_comparator.get_ob() def __set__(self, value): if isinstance(value, PyComparator): if (value).get_comparator() == NULL: raise Exception("Cannot set %s as comparator" % value) else: self.py_comparator = value else: self.py_comparator = PyGenericComparator(value) self.copts.comparator = self.py_comparator.get_comparator() property merge_operator: def __get__(self): if self.py_merge_operator is None: return None return self.py_merge_operator.get_ob() def __set__(self, value): self.py_merge_operator = PyMergeOperator(value) self.copts.merge_operator = self.py_merge_operator.get_operator() property prefix_extractor: def __get__(self): if self.py_prefix_extractor is None: return None return self.py_prefix_extractor.get_ob() def __set__(self, value): self.py_prefix_extractor = PySliceTransform(value) self.copts.prefix_extractor = self.py_prefix_extractor.get_transformer() property optimize_filters_for_hits: def __get__(self): return self.copts.optimize_filters_for_hits def __set__(self, value): self.copts.optimize_filters_for_hits = value property paranoid_file_checks: def __get__(self): return self.copts.paranoid_file_checks def __set__(self, value): self.copts.paranoid_file_checks = value cdef class Options(ColumnFamilyOptions): cdef options.Options* opts cdef PyCache py_row_cache def __cinit__(self): # Destroy the existing ColumnFamilyOptions() del self.copts self.opts = NULL self.copts = self.opts = new options.Options() self.in_use = False def __dealloc__(self): if not self.opts == NULL: self.copts = NULL del self.opts def __init__(self, **kwargs): ColumnFamilyOptions.__init__(self) self.py_row_cache = None for key, value in kwargs.items(): setattr(self, key, value) def IncreaseParallelism(self, int total_threads=16): self.opts.IncreaseParallelism(total_threads) property create_if_missing: def __get__(self): return self.opts.create_if_missing def __set__(self, value): self.opts.create_if_missing = value property create_missing_column_families: def __get__(self): return self.opts.create_missing_column_families def __set__(self, value): self.opts.create_missing_column_families = value property error_if_exists: def __get__(self): return self.opts.error_if_exists def __set__(self, value): self.opts.error_if_exists = value property paranoid_checks: def __get__(self): return self.opts.paranoid_checks def __set__(self, value): self.opts.paranoid_checks = value property max_open_files: def __get__(self): return self.opts.max_open_files def __set__(self, value): self.opts.max_open_files = value property use_fsync: def __get__(self): return self.opts.use_fsync def __set__(self, value): self.opts.use_fsync = value property db_log_dir: def __get__(self): return string_to_path(self.opts.db_log_dir) def __set__(self, value): self.opts.db_log_dir = path_to_string(value) property wal_dir: def __get__(self): return string_to_path(self.opts.wal_dir) def __set__(self, value): self.opts.wal_dir = path_to_string(value) property delete_obsolete_files_period_micros: def __get__(self): return self.opts.delete_obsolete_files_period_micros def __set__(self, value): self.opts.delete_obsolete_files_period_micros = value property max_background_compactions: def __get__(self): return self.opts.max_background_compactions def __set__(self, value): self.opts.max_background_compactions = value property stats_history_buffer_size: def __get__(self): return self.opts.stats_history_buffer_size def __set__(self, value): self.opts.stats_history_buffer_size = value property max_background_jobs: def __get__(self): return self.opts.max_background_jobs def __set__(self, value): self.opts.max_background_jobs = value property max_background_flushes: def __get__(self): return self.opts.max_background_flushes def __set__(self, value): self.opts.max_background_flushes = value property max_log_file_size: def __get__(self): return self.opts.max_log_file_size def __set__(self, value): self.opts.max_log_file_size = value property log_file_time_to_roll: def __get__(self): return self.opts.log_file_time_to_roll def __set__(self, value): self.opts.log_file_time_to_roll = value property keep_log_file_num: def __get__(self): return self.opts.keep_log_file_num def __set__(self, value): self.opts.keep_log_file_num = value property max_manifest_file_size: def __get__(self): return self.opts.max_manifest_file_size def __set__(self, value): self.opts.max_manifest_file_size = value property table_cache_numshardbits: def __get__(self): return self.opts.table_cache_numshardbits def __set__(self, value): self.opts.table_cache_numshardbits = value property wal_ttl_seconds: def __get__(self): return self.opts.WAL_ttl_seconds def __set__(self, value): self.opts.WAL_ttl_seconds = value property wal_size_limit_mb: def __get__(self): return self.opts.WAL_size_limit_MB def __set__(self, value): self.opts.WAL_size_limit_MB = value property manifest_preallocation_size: def __get__(self): return self.opts.manifest_preallocation_size def __set__(self, value): self.opts.manifest_preallocation_size = value property enable_write_thread_adaptive_yield: def __get__(self): return self.opts.enable_write_thread_adaptive_yield def __set__(self, value): self.opts.enable_write_thread_adaptive_yield = value property allow_concurrent_memtable_write: def __get__(self): return self.opts.allow_concurrent_memtable_write def __set__(self, value): self.opts.allow_concurrent_memtable_write = value property allow_mmap_reads: def __get__(self): return self.opts.allow_mmap_reads def __set__(self, value): self.opts.allow_mmap_reads = value property allow_mmap_writes: def __get__(self): return self.opts.allow_mmap_writes def __set__(self, value): self.opts.allow_mmap_writes = value property is_fd_close_on_exec: def __get__(self): return self.opts.is_fd_close_on_exec def __set__(self, value): self.opts.is_fd_close_on_exec = value property skip_log_error_on_recovery: def __get__(self): return self.opts.skip_log_error_on_recovery def __set__(self, value): self.opts.skip_log_error_on_recovery = value property stats_dump_period_sec: def __get__(self): return self.opts.stats_dump_period_sec def __set__(self, value): self.opts.stats_dump_period_sec = value property advise_random_on_open: def __get__(self): return self.opts.advise_random_on_open def __set__(self, value): self.opts.advise_random_on_open = value # TODO: need to remove -Wconversion to make this work # property access_hint_on_compaction_start: # def __get__(self): # return self.opts.access_hint_on_compaction_start # def __set__(self, AccessHint value): # self.opts.access_hint_on_compaction_start = value property use_adaptive_mutex: def __get__(self): return self.opts.use_adaptive_mutex def __set__(self, value): self.opts.use_adaptive_mutex = value property bytes_per_sync: def __get__(self): return self.opts.bytes_per_sync def __set__(self, value): self.opts.bytes_per_sync = value property row_cache: def __get__(self): return self.py_row_cache def __set__(self, value): if value is None: self.py_row_cache = None self.opts.row_cache.reset() elif not isinstance(value, PyCache): raise Exception("row_cache must be a Cache object") else: self.py_row_cache = value self.opts.row_cache = self.py_row_cache.get_cache() # Forward declaration cdef class Snapshot cdef class KeysIterator cdef class ValuesIterator cdef class ItemsIterator cdef class ReversedIterator # Forward declaration cdef class WriteBatchIterator cdef class WriteBatch(object): cdef db.WriteBatch* batch def __cinit__(self, data=None): self.batch = NULL if data is not None: self.batch = new db.WriteBatch(bytes_to_string(data)) else: self.batch = new db.WriteBatch() def __dealloc__(self): if not self.batch == NULL: del self.batch def put(self, key, value): cdef db.ColumnFamilyHandle* cf_handle = NULL if isinstance(key, tuple): column_family, key = key cf_handle = (column_family).get_handle() # nullptr is default family self.batch.Put(cf_handle, bytes_to_slice(key), bytes_to_slice(value)) def merge(self, key, value): cdef db.ColumnFamilyHandle* cf_handle = NULL if isinstance(key, tuple): column_family, key = key cf_handle = (column_family).get_handle() # nullptr is default family self.batch.Merge(cf_handle, bytes_to_slice(key), bytes_to_slice(value)) def delete(self, key): cdef db.ColumnFamilyHandle* cf_handle = NULL if isinstance(key, tuple): column_family, key = key cf_handle = (column_family).get_handle() # nullptr is default family self.batch.Delete(cf_handle, bytes_to_slice(key)) def clear(self): self.batch.Clear() def data(self): return string_to_bytes(self.batch.Data()) def count(self): return self.batch.Count() def __iter__(self): return WriteBatchIterator(self) @cython.internal cdef class WriteBatchIterator(object): # Need a reference to the WriteBatch. # The BatchItems are only pointers to the memory in WriteBatch. cdef WriteBatch batch cdef vector[db.BatchItem] items cdef size_t pos def __init__(self, WriteBatch batch): cdef Status st self.batch = batch self.pos = 0 st = db.get_batch_items(batch.batch, cython.address(self.items)) check_status(st) def __iter__(self): return self def __next__(self): if self.pos == self.items.size(): raise StopIteration() cdef str op if self.items[self.pos].op == db.BatchItemOpPut: op = "Put" elif self.items[self.pos].op == db.BatchItemOpMerge: op = "Merge" elif self.items[self.pos].op == db.BatchItemOpDelte: op = "Delete" if self.items[self.pos].column_family_id != 0: # Column Family is set ret = ( op, ( self.items[self.pos].column_family_id, slice_to_bytes(self.items[self.pos].key) ), slice_to_bytes(self.items[self.pos].value) ) else: ret = ( op, slice_to_bytes(self.items[self.pos].key), slice_to_bytes(self.items[self.pos].value) ) self.pos += 1 return ret @cython.no_gc_clear cdef class DB(object): cdef Options opts cdef db.DB* db cdef list cf_handles cdef list cf_options def __cinit__(self, db_name, Options opts, dict column_families=None, read_only=False): cdef Status st cdef string db_path cdef vector[db.ColumnFamilyDescriptor] column_family_descriptors cdef vector[db.ColumnFamilyHandle*] column_family_handles cdef bytes default_cf_name = db.kDefaultColumnFamilyName self.db = NULL self.opts = None self.cf_handles = [] self.cf_options = [] if opts.in_use: raise Exception("Options object is already used by another DB") db_path = path_to_string(db_name) if not column_families or default_cf_name not in column_families: # Always add the default column family column_family_descriptors.push_back( db.ColumnFamilyDescriptor( db.kDefaultColumnFamilyName, options.ColumnFamilyOptions(deref(opts.opts)) ) ) self.cf_options.append(None) # Since they are the same as db if column_families: for cf_name, cf_options in column_families.items(): if not isinstance(cf_name, bytes): raise TypeError( f"column family name {cf_name!r} is not of type {bytes}!" ) if not isinstance(cf_options, ColumnFamilyOptions): raise TypeError( f"column family options {cf_options!r} is not of type " f"{ColumnFamilyOptions}!" ) if (cf_options).in_use: raise Exception( f"ColumnFamilyOptions object for {cf_name} is already " "used by another Column Family" ) (cf_options).in_use = True column_family_descriptors.push_back( db.ColumnFamilyDescriptor( cf_name, deref((cf_options).copts) ) ) self.cf_options.append(cf_options) if read_only: with nogil: st = db.DB_OpenForReadOnly_ColumnFamilies( deref(opts.opts), db_path, column_family_descriptors, &column_family_handles, &self.db, False) else: with nogil: st = db.DB_Open_ColumnFamilies( deref(opts.opts), db_path, column_family_descriptors, &column_family_handles, &self.db) check_status(st) for handle in column_family_handles: wrapper = _ColumnFamilyHandle.from_handle_ptr(handle) self.cf_handles.append(wrapper) # Inject the loggers into the python callbacks cdef shared_ptr[logger.Logger] info_log = self.db.GetOptions( self.db.DefaultColumnFamily()).info_log if opts.py_comparator is not None: opts.py_comparator.set_info_log(info_log) if opts.py_table_factory is not None: opts.py_table_factory.set_info_log(info_log) if opts.prefix_extractor is not None: opts.py_prefix_extractor.set_info_log(info_log) cdef ColumnFamilyOptions copts for idx, copts in enumerate(self.cf_options): if not copts: continue info_log = self.db.GetOptions(column_family_handles[idx]).info_log if copts.py_comparator is not None: copts.py_comparator.set_info_log(info_log) if copts.py_table_factory is not None: copts.py_table_factory.set_info_log(info_log) if copts.prefix_extractor is not None: copts.py_prefix_extractor.set_info_log(info_log) self.opts = opts self.opts.in_use = True def close(self, safe=True): cdef ColumnFamilyOptions copts cdef cpp_bool c_safe = safe cdef Status st if self.db != NULL: # We need stop backround compactions with nogil: db.CancelAllBackgroundWork(self.db, c_safe) # We have to make sure we delete the handles so rocksdb doesn't # assert when we delete the db del self.cf_handles[:] for copts in self.cf_options: if copts: copts.in_use = False del self.cf_options[:] with nogil: st = self.db.Close() self.db = NULL if self.opts is not None: self.opts.in_use = False def __dealloc__(self): self.close() @property def column_families(self): return [handle.weakref for handle in self.cf_handles] def get_column_family(self, bytes name): for handle in self.cf_handles: if handle.name == name: return handle.weakref def put(self, key, value, sync=False, disable_wal=False): cdef Status st cdef options.WriteOptions opts opts.sync = sync opts.disableWAL = disable_wal if isinstance(key, tuple): column_family, key = key else: column_family = None cdef Slice c_key = bytes_to_slice(key) cdef Slice c_value = bytes_to_slice(value) cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() with nogil: st = self.db.Put(opts, cf_handle, c_key, c_value) check_status(st) def delete(self, key, sync=False, disable_wal=False): cdef Status st cdef options.WriteOptions opts opts.sync = sync opts.disableWAL = disable_wal if isinstance(key, tuple): column_family, key = key else: column_family = None cdef Slice c_key = bytes_to_slice(key) cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() with nogil: st = self.db.Delete(opts, cf_handle, c_key) check_status(st) def merge(self, key, value, sync=False, disable_wal=False): cdef Status st cdef options.WriteOptions opts opts.sync = sync opts.disableWAL = disable_wal if isinstance(key, tuple): column_family, key = key else: column_family = None cdef Slice c_key = bytes_to_slice(key) cdef Slice c_value = bytes_to_slice(value) cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() with nogil: st = self.db.Merge(opts, cf_handle, c_key, c_value) check_status(st) def write(self, WriteBatch batch, sync=False, disable_wal=False): cdef Status st cdef options.WriteOptions opts opts.sync = sync opts.disableWAL = disable_wal with nogil: st = self.db.Write(opts, batch.batch) check_status(st) def get(self, key, *args, **kwargs): cdef string res cdef Status st cdef options.ReadOptions opts opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) if isinstance(key, tuple): column_family, key = key else: column_family = None cdef Slice c_key = bytes_to_slice(key) cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() with nogil: st = self.db.Get(opts, cf_handle, c_key, cython.address(res)) if st.ok(): return string_to_bytes(res) elif st.IsNotFound(): return None else: check_status(st) def multi_get(self, keys, *args, as_dict=True, **kwargs): if as_dict: # Remove duplicate keys keys = list(dict.fromkeys(keys)) cdef vector[string] values values.resize(len(keys)) cdef db.ColumnFamilyHandle* cf_handle cdef vector[db.ColumnFamilyHandle*] cf_handles cdef vector[Slice] c_keys for key in keys: if isinstance(key, tuple): py_handle, key = key cf_handle = (py_handle).get_handle() else: cf_handle = self.db.DefaultColumnFamily() c_keys.push_back(bytes_to_slice(key)) cf_handles.push_back(cf_handle) cdef options.ReadOptions opts opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) cdef vector[Status] res with nogil: res = self.db.MultiGet( opts, cf_handles, c_keys, cython.address(values)) cdef dict ret_dict = {} cdef list ret_list = [] if as_dict: for index in range(len(keys)): if res[index].ok(): ret_dict[keys[index]] = string_to_bytes(values[index]) elif res[index].IsNotFound(): ret_dict[keys[index]] = None else: check_status(res[index]) return ret_dict else: for index in range(len(keys)): if res[index].ok(): ret_list.append(string_to_bytes(values[index])) elif res[index].IsNotFound(): ret_list.append(None) else: check_status(res[index]) return ret_list def key_may_exist(self, key, fetch=False, *args, **kwargs): cdef string value cdef cpp_bool value_found cdef cpp_bool exists cdef options.ReadOptions opts cdef Slice c_key cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) if isinstance(key, tuple): column_family, key = key cf_handle = (column_family).get_handle() c_key = bytes_to_slice(key) exists = False if fetch: value_found = False with nogil: exists = self.db.KeyMayExist( opts, cf_handle, c_key, cython.address(value), cython.address(value_found)) if exists: if value_found: return (True, string_to_bytes(value)) else: return (True, None) else: return (False, None) else: with nogil: exists = self.db.KeyMayExist( opts, cf_handle, c_key, cython.address(value)) return (exists, None) def iterkeys(self, ColumnFamilyHandle column_family=None, *args, **kwargs): cdef options.ReadOptions opts cdef KeysIterator it cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = column_family.get_handle() opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) it = KeysIterator(self, column_family) with nogil: it.ptr = self.db.NewIterator(opts, cf_handle) return it def itervalues(self, ColumnFamilyHandle column_family=None, *args, **kwargs): cdef options.ReadOptions opts cdef ValuesIterator it cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = column_family.get_handle() opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) it = ValuesIterator(self) with nogil: it.ptr = self.db.NewIterator(opts, cf_handle) return it def iteritems(self, ColumnFamilyHandle column_family=None, *args, **kwargs): cdef options.ReadOptions opts cdef ItemsIterator it cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = column_family.get_handle() opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) it = ItemsIterator(self, column_family) with nogil: it.ptr = self.db.NewIterator(opts, cf_handle) return it def iterskeys(self, column_families, *args, **kwargs): cdef vector[db.Iterator*] iters iters.resize(len(column_families)) cdef options.ReadOptions opts cdef db.Iterator* it_ptr cdef KeysIterator it cdef db.ColumnFamilyHandle* cf_handle cdef vector[db.ColumnFamilyHandle*] cf_handles for column_family in column_families: cf_handle = (column_family).get_handle() cf_handles.push_back(cf_handle) opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) with nogil: self.db.NewIterators(opts, cf_handles, &iters) cf_iter = iter(column_families) cdef list ret = [] for it_ptr in iters: it = KeysIterator(self, next(cf_iter)) it.ptr = it_ptr ret.append(it) return ret def itersvalues(self, column_families, *args, **kwargs): cdef vector[db.Iterator*] iters iters.resize(len(column_families)) cdef options.ReadOptions opts cdef db.Iterator* it_ptr cdef ValuesIterator it cdef db.ColumnFamilyHandle* cf_handle cdef vector[db.ColumnFamilyHandle*] cf_handles for column_family in column_families: cf_handle = (column_family).get_handle() cf_handles.push_back(cf_handle) opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) with nogil: self.db.NewIterators(opts, cf_handles, &iters) cdef list ret = [] for it_ptr in iters: it = ValuesIterator(self) it.ptr = it_ptr ret.append(it) return ret def iterskeys(self, column_families, *args, **kwargs): cdef vector[db.Iterator*] iters iters.resize(len(column_families)) cdef options.ReadOptions opts cdef db.Iterator* it_ptr cdef ItemsIterator it cdef db.ColumnFamilyHandle* cf_handle cdef vector[db.ColumnFamilyHandle*] cf_handles for column_family in column_families: cf_handle = (column_family).get_handle() cf_handles.push_back(cf_handle) opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs)) with nogil: self.db.NewIterators(opts, cf_handles, &iters) cf_iter = iter(column_families) cdef list ret = [] for it_ptr in iters: it = ItemsIterator(self, next(cf_iter)) it.ptr = it_ptr ret.append(it) return ret def snapshot(self): return Snapshot(self) def get_property(self, prop, ColumnFamilyHandle column_family=None): cdef string value cdef Slice c_prop = bytes_to_slice(prop) cdef cpp_bool ret = False cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = column_family.get_handle() with nogil: ret = self.db.GetProperty(cf_handle, c_prop, cython.address(value)) if ret: return string_to_bytes(value) else: return None def get_live_files_metadata(self): cdef vector[db.LiveFileMetaData] metadata with nogil: self.db.GetLiveFilesMetaData(cython.address(metadata)) ret = [] for ob in metadata: t = {} t['name'] = string_to_path(ob.name) t['level'] = ob.level t['size'] = ob.size t['smallestkey'] = string_to_bytes(ob.smallestkey) t['largestkey'] = string_to_bytes(ob.largestkey) t['smallest_seqno'] = ob.smallest_seqno t['largest_seqno'] = ob.largest_seqno ret.append(t) return ret def get_column_family_meta_data(self, ColumnFamilyHandle column_family=None): cdef db.ColumnFamilyMetaData metadata cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() with nogil: self.db.GetColumnFamilyMetaData(cf_handle, cython.address(metadata)) return { "size":metadata.size, "file_count":metadata.file_count, } def compact_range(self, begin=None, end=None, ColumnFamilyHandle column_family=None, **py_options): cdef options.CompactRangeOptions c_options c_options.change_level = py_options.get('change_level', False) c_options.target_level = py_options.get('target_level', -1) blc = py_options.get('bottommost_level_compaction', 'if_compaction_filter') if blc == 'skip': c_options.bottommost_level_compaction = options.blc_skip elif blc == 'if_compaction_filter': c_options.bottommost_level_compaction = options.blc_is_filter elif blc == 'force': c_options.bottommost_level_compaction = options.blc_force else: raise ValueError("bottommost_level_compaction is not valid") cdef Status st cdef Slice begin_val cdef Slice end_val cdef Slice* begin_ptr cdef Slice* end_ptr begin_ptr = NULL end_ptr = NULL if begin is not None: begin_val = bytes_to_slice(begin) begin_ptr = cython.address(begin_val) if end is not None: end_val = bytes_to_slice(end) end_ptr = cython.address(end_val) cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily() if column_family: cf_handle = (column_family).get_handle() st = self.db.CompactRange(c_options, cf_handle, begin_ptr, end_ptr) check_status(st) @staticmethod def __parse_read_opts( verify_checksums=False, fill_cache=True, snapshot=None, read_tier="all"): # TODO: Is this really effiencet ? return locals() cdef options.ReadOptions build_read_opts(self, dict py_opts): cdef options.ReadOptions opts opts.verify_checksums = py_opts['verify_checksums'] opts.fill_cache = py_opts['fill_cache'] if py_opts['snapshot'] is not None: opts.snapshot = ((py_opts['snapshot'])).ptr if py_opts['read_tier'] == "all": opts.read_tier = options.kReadAllTier elif py_opts['read_tier'] == 'cache': opts.read_tier = options.kBlockCacheTier else: raise ValueError("Invalid read_tier") return opts property options: def __get__(self): return self.opts def create_column_family(self, bytes name, ColumnFamilyOptions copts): cdef db.ColumnFamilyHandle* cf_handle cdef Status st cdef string c_name = name for handle in self.cf_handles: if handle.name == name: raise ValueError(f"{name} is already an existing column family") if copts.in_use: raise Exception("ColumnFamilyOptions are in_use by another column family") copts.in_use = True with nogil: st = self.db.CreateColumnFamily(deref(copts.copts), c_name, &cf_handle) check_status(st) handle = _ColumnFamilyHandle.from_handle_ptr(cf_handle) self.cf_handles.append(handle) self.cf_options.append(copts) return handle.weakref def drop_column_family(self, ColumnFamilyHandle weak_handle not None): cdef db.ColumnFamilyHandle* cf_handle cdef ColumnFamilyOptions copts cdef Status st cf_handle = weak_handle.get_handle() with nogil: st = self.db.DropColumnFamily(cf_handle) check_status(st) py_handle = weak_handle._ref() index = self.cf_handles.index(py_handle) copts = self.cf_options.pop(index) del self.cf_handles[index] del py_handle if copts: copts.in_use = False def repair_db(db_name, Options opts): cdef Status st cdef string db_path db_path = path_to_string(db_name) st = db.RepairDB(db_path, deref(opts.opts)) check_status(st) def list_column_families(db_name, Options opts): cdef Status st cdef string db_path cdef vector[string] column_families db_path = path_to_string(db_name) with nogil: st = db.ListColumnFamilies(deref(opts.opts), db_path, &column_families) check_status(st) return column_families @cython.no_gc_clear @cython.internal cdef class Snapshot(object): cdef const snapshot.Snapshot* ptr cdef DB db def __cinit__(self, DB db): self.db = db self.ptr = NULL with nogil: self.ptr = db.db.GetSnapshot() def __dealloc__(self): if not self.ptr == NULL: with nogil: self.db.db.ReleaseSnapshot(self.ptr) @cython.internal cdef class BaseIterator(object): cdef iterator.Iterator* ptr cdef DB db cdef ColumnFamilyHandle handle def __cinit__(self, DB db, ColumnFamilyHandle handle = None): self.db = db self.ptr = NULL self.handle = handle def __dealloc__(self): if not self.ptr == NULL: del self.ptr def __iter__(self): return self def __next__(self): if not self.ptr.Valid(): raise StopIteration() cdef object ret = self.get_ob() with nogil: self.ptr.Next() check_status(self.ptr.status()) return ret def get(self): cdef object ret = self.get_ob() return ret def __reversed__(self): return ReversedIterator(self) cpdef seek_to_first(self): with nogil: self.ptr.SeekToFirst() check_status(self.ptr.status()) cpdef seek_to_last(self): with nogil: self.ptr.SeekToLast() check_status(self.ptr.status()) cpdef seek(self, key): cdef Slice c_key = bytes_to_slice(key) with nogil: self.ptr.Seek(c_key) check_status(self.ptr.status()) cpdef seek_for_prev(self, key): cdef Slice c_key = bytes_to_slice(key) with nogil: self.ptr.SeekForPrev(c_key) check_status(self.ptr.status()) cdef object get_ob(self): return None @cython.internal cdef class KeysIterator(BaseIterator): cdef object get_ob(self): cdef Slice c_key with nogil: c_key = self.ptr.key() check_status(self.ptr.status()) if self.handle: return self.handle, slice_to_bytes(c_key) return slice_to_bytes(c_key) @cython.internal cdef class ValuesIterator(BaseIterator): cdef object get_ob(self): cdef Slice c_value with nogil: c_value = self.ptr.value() check_status(self.ptr.status()) return slice_to_bytes(c_value) @cython.internal cdef class ItemsIterator(BaseIterator): cdef object get_ob(self): cdef Slice c_key cdef Slice c_value with nogil: c_key = self.ptr.key() c_value = self.ptr.value() check_status(self.ptr.status()) if self.handle: return ((self.handle, slice_to_bytes(c_key)), slice_to_bytes(c_value)) return (slice_to_bytes(c_key), slice_to_bytes(c_value)) @cython.internal cdef class ReversedIterator(object): cdef BaseIterator it def __cinit__(self, BaseIterator it): self.it = it def seek_to_first(self): self.it.seek_to_first() def seek_to_last(self): self.it.seek_to_last() def seek(self, key): self.it.seek(key) def seek_for_prev(self, key): self.it.seek_for_prev(key) def get(self): return self.it.get() def __iter__(self): return self def __reversed__(self): return self.it def __next__(self): if not self.it.ptr.Valid(): raise StopIteration() cdef object ret = self.it.get_ob() with nogil: self.it.ptr.Prev() check_status(self.it.ptr.status()) return ret cdef class BackupEngine(object): cdef backup.BackupEngine* engine def __cinit__(self, backup_dir): cdef Status st cdef string c_backup_dir self.engine = NULL c_backup_dir = path_to_string(backup_dir) st = backup.BackupEngine_Open( env.Env_Default(), backup.BackupableDBOptions(c_backup_dir), cython.address(self.engine)) check_status(st) def __dealloc__(self): if not self.engine == NULL: with nogil: del self.engine def create_backup(self, DB db, flush_before_backup=False): cdef Status st cdef cpp_bool c_flush_before_backup c_flush_before_backup = flush_before_backup with nogil: st = self.engine.CreateNewBackup(db.db, c_flush_before_backup) check_status(st) def restore_backup(self, backup_id, db_dir, wal_dir): cdef Status st cdef backup.BackupID c_backup_id cdef string c_db_dir cdef string c_wal_dir c_backup_id = backup_id c_db_dir = path_to_string(db_dir) c_wal_dir = path_to_string(wal_dir) with nogil: st = self.engine.RestoreDBFromBackup( c_backup_id, c_db_dir, c_wal_dir) check_status(st) def restore_latest_backup(self, db_dir, wal_dir): cdef Status st cdef string c_db_dir cdef string c_wal_dir c_db_dir = path_to_string(db_dir) c_wal_dir = path_to_string(wal_dir) with nogil: st = self.engine.RestoreDBFromLatestBackup(c_db_dir, c_wal_dir) check_status(st) def stop_backup(self): with nogil: self.engine.StopBackup() def purge_old_backups(self, num_backups_to_keep): cdef Status st cdef uint32_t c_num_backups_to_keep c_num_backups_to_keep = num_backups_to_keep with nogil: st = self.engine.PurgeOldBackups(c_num_backups_to_keep) check_status(st) def delete_backup(self, backup_id): cdef Status st cdef backup.BackupID c_backup_id c_backup_id = backup_id with nogil: st = self.engine.DeleteBackup(c_backup_id) check_status(st) def get_backup_info(self): cdef vector[backup.BackupInfo] backup_info with nogil: self.engine.GetBackupInfo(cython.address(backup_info)) ret = [] for ob in backup_info: t = {} t['backup_id'] = ob.backup_id t['timestamp'] = ob.timestamp t['size'] = ob.size ret.append(t) return ret python-rocksdb-0.8.0~rc3/rocksdb/backup.pxd000066400000000000000000000022331400433636700207120ustar00rootroot00000000000000from libcpp cimport bool as cpp_bool from libcpp.string cimport string from libcpp.vector cimport vector from libc.stdint cimport uint32_t from libc.stdint cimport int64_t from libc.stdint cimport uint64_t from .status cimport Status from .db cimport DB from .env cimport Env cdef extern from "rocksdb/utilities/backupable_db.h" namespace "rocksdb": ctypedef uint32_t BackupID cdef cppclass BackupableDBOptions: BackupableDBOptions(const string& backup_dir) cdef struct BackupInfo: BackupID backup_id int64_t timestamp uint64_t size cdef cppclass BackupEngine: Status CreateNewBackup(DB*, cpp_bool) nogil except+ Status PurgeOldBackups(uint32_t) nogil except+ Status DeleteBackup(BackupID) nogil except+ void StopBackup() nogil except+ void GetBackupInfo(vector[BackupInfo]*) nogil except+ Status RestoreDBFromBackup(BackupID, string&, string&) nogil except+ Status RestoreDBFromLatestBackup(string&, string&) nogil except+ cdef Status BackupEngine_Open "rocksdb::BackupEngine::Open"( Env*, BackupableDBOptions&, BackupEngine**) python-rocksdb-0.8.0~rc3/rocksdb/cache.pxd000066400000000000000000000003651400433636700205140ustar00rootroot00000000000000from .std_memory cimport shared_ptr cdef extern from "rocksdb/cache.h" namespace "rocksdb": cdef cppclass Cache: pass cdef extern shared_ptr[Cache] NewLRUCache(size_t) cdef extern shared_ptr[Cache] NewLRUCache(size_t, int) python-rocksdb-0.8.0~rc3/rocksdb/comparator.pxd000066400000000000000000000013051400433636700216130ustar00rootroot00000000000000from libcpp.string cimport string from .slice_ cimport Slice from .logger cimport Logger from .std_memory cimport shared_ptr cdef extern from "rocksdb/comparator.h" namespace "rocksdb": cdef cppclass Comparator: const char* Name() int Compare(const Slice&, const Slice&) const cdef extern const Comparator* BytewiseComparator() nogil except + ctypedef int (*compare_func)( void*, Logger*, string&, const Slice&, const Slice&) cdef extern from "cpp/comparator_wrapper.hpp" namespace "py_rocks": cdef cppclass ComparatorWrapper: ComparatorWrapper(string, void*, compare_func) nogil except + void set_info_log(shared_ptr[Logger]) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/cpp/000077500000000000000000000000001400433636700175125ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/rocksdb/cpp/comparator_wrapper.hpp000066400000000000000000000033771400433636700241440ustar00rootroot00000000000000#include "rocksdb/comparator.h" #include "rocksdb/env.h" #include using std::string; using rocksdb::Comparator; using rocksdb::Slice; using rocksdb::Logger; namespace py_rocks { class ComparatorWrapper: public Comparator { public: typedef int (*compare_func)( void*, Logger*, string&, const Slice&, const Slice&); ComparatorWrapper( string name, void* compare_context, compare_func compare_callback): name(name), compare_context(compare_context), compare_callback(compare_callback) {} virtual int Compare(const Slice& a, const Slice& b) const { string error_msg; int val; val = this->compare_callback( this->compare_context, this->info_log.get(), error_msg, a, b); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } return val; } virtual const char* Name() const { return this->name.c_str(); } virtual void FindShortestSeparator(string*, const Slice&) const {} virtual void FindShortSuccessor(string*) const {} void set_info_log(std::shared_ptr info_log) { this->info_log = info_log; } private: string name; void* compare_context; compare_func compare_callback; std::shared_ptr info_log; }; } python-rocksdb-0.8.0~rc3/rocksdb/cpp/filter_policy_wrapper.hpp000066400000000000000000000050411400433636700246270ustar00rootroot00000000000000#include "rocksdb/filter_policy.h" #include "rocksdb/env.h" #include using std::string; using rocksdb::FilterPolicy; using rocksdb::Slice; using rocksdb::Logger; namespace py_rocks { class FilterPolicyWrapper: public FilterPolicy { public: typedef void (*create_filter_func)( void* ctx, Logger*, string&, const Slice* keys, int n, string* dst); typedef bool (*key_may_match_func)( void* ctx, Logger*, string&, const Slice& key, const Slice& filter); FilterPolicyWrapper( string name, void* ctx, create_filter_func create_filter_callback, key_may_match_func key_may_match_callback): name(name), ctx(ctx), create_filter_callback(create_filter_callback), key_may_match_callback(key_may_match_callback) {} virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const { string error_msg; this->create_filter_callback( this->ctx, this->info_log.get(), error_msg, keys, n, dst); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } } virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const { string error_msg; bool val; val = this->key_may_match_callback( this->ctx, this->info_log.get(), error_msg, key, filter); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } return val; } virtual const char* Name() const { return this->name.c_str(); } void set_info_log(std::shared_ptr info_log) { this->info_log = info_log; } private: string name; void* ctx; create_filter_func create_filter_callback; key_may_match_func key_may_match_callback; std::shared_ptr info_log; }; } python-rocksdb-0.8.0~rc3/rocksdb/cpp/memtable_factories.hpp000066400000000000000000000005541400433636700240540ustar00rootroot00000000000000#include "rocksdb/memtablerep.h" using rocksdb::MemTableRepFactory; using rocksdb::VectorRepFactory; using rocksdb::SkipListFactory; namespace py_rocks { MemTableRepFactory* NewVectorRepFactory(size_t count = 0) { return new VectorRepFactory(count); } MemTableRepFactory* NewSkipListFactory() { return new SkipListFactory(); } } python-rocksdb-0.8.0~rc3/rocksdb/cpp/merge_operator_wrapper.hpp000066400000000000000000000101011400433636700247660ustar00rootroot00000000000000#include "rocksdb/merge_operator.h" using std::string; using std::deque; using rocksdb::Slice; using rocksdb::Logger; using rocksdb::MergeOperator; using rocksdb::AssociativeMergeOperator; namespace py_rocks { class AssociativeMergeOperatorWrapper: public AssociativeMergeOperator { public: typedef bool (*merge_func)( void*, const Slice& key, const Slice* existing_value, const Slice& value, std::string* new_value, Logger* logger); AssociativeMergeOperatorWrapper( string name, void* merge_context, merge_func merge_callback): name(name), merge_context(merge_context), merge_callback(merge_callback) {} virtual bool Merge( const Slice& key, const Slice* existing_value, const Slice& value, std::string* new_value, Logger* logger) const { return this->merge_callback( this->merge_context, key, existing_value, value, new_value, logger); } virtual const char* Name() const { return this->name.c_str(); } private: string name; void* merge_context; merge_func merge_callback; }; class MergeOperatorWrapper: public MergeOperator { public: typedef bool (*full_merge_func)( void* ctx, const Slice& key, const Slice* existing_value, const deque& operand_list, string* new_value, Logger* logger); typedef bool (*partial_merge_func)( void* ctx, const Slice& key, const Slice& left_op, const Slice& right_op, string* new_value, Logger* logger); MergeOperatorWrapper( string name, void* full_merge_context, void* partial_merge_context, full_merge_func full_merge_callback, partial_merge_func partial_merge_callback): name(name), full_merge_context(full_merge_context), partial_merge_context(partial_merge_context), full_merge_callback(full_merge_callback), partial_merge_callback(partial_merge_callback) {} virtual bool FullMerge( const Slice& key, const Slice* existing_value, const deque& operand_list, string* new_value, Logger* logger) const { return this->full_merge_callback( this->full_merge_context, key, existing_value, operand_list, new_value, logger); } virtual bool PartialMerge ( const Slice& key, const Slice& left_operand, const Slice& right_operand, string* new_value, Logger* logger) const { return this->partial_merge_callback( this->partial_merge_context, key, left_operand, right_operand, new_value, logger); } virtual const char* Name() const { return this->name.c_str(); } private: string name; void* full_merge_context; void* partial_merge_context; full_merge_func full_merge_callback; partial_merge_func partial_merge_callback; }; } python-rocksdb-0.8.0~rc3/rocksdb/cpp/slice_transform_wrapper.hpp000066400000000000000000000060641400433636700251630ustar00rootroot00000000000000#include #include "rocksdb/slice_transform.h" #include "rocksdb/env.h" #include using std::string; using rocksdb::SliceTransform; using rocksdb::Slice; using rocksdb::Logger; namespace py_rocks { class SliceTransformWrapper: public SliceTransform { public: typedef Slice (*transform_func)( void*, Logger*, string&, const Slice&); typedef bool (*in_domain_func)( void*, Logger*, string&, const Slice&); typedef bool (*in_range_func)( void*, Logger*, string&, const Slice&); SliceTransformWrapper( string name, void* ctx, transform_func transform_callback, in_domain_func in_domain_callback, in_range_func in_range_callback): name(name), ctx(ctx), transform_callback(transform_callback), in_domain_callback(in_domain_callback), in_range_callback(in_range_callback) {} virtual const char* Name() const { return this->name.c_str(); } virtual Slice Transform(const Slice& src) const { string error_msg; Slice val; val = this->transform_callback( this->ctx, this->info_log.get(), error_msg, src); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } return val; } virtual bool InDomain(const Slice& src) const { string error_msg; bool val; val = this->in_domain_callback( this->ctx, this->info_log.get(), error_msg, src); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } return val; } virtual bool InRange(const Slice& dst) const { string error_msg; bool val; val = this->in_range_callback( this->ctx, this->info_log.get(), error_msg, dst); if (error_msg.size()) { throw std::runtime_error(error_msg.c_str()); } return val; } void set_info_log(std::shared_ptr info_log) { this->info_log = info_log; } private: string name; void* ctx; transform_func transform_callback; in_domain_func in_domain_callback; in_range_func in_range_callback; std::shared_ptr info_log; }; } python-rocksdb-0.8.0~rc3/rocksdb/cpp/utils.hpp000066400000000000000000000002211400433636700213560ustar00rootroot00000000000000#include namespace py_rocks { template const T* vector_data(std::vector& v) { return v.data(); } } python-rocksdb-0.8.0~rc3/rocksdb/cpp/write_batch_iter_helper.hpp000066400000000000000000000036711400433636700251070ustar00rootroot00000000000000#pragma once #include #include "rocksdb/write_batch.h" namespace py_rocks { class RecordItemsHandler: public rocksdb::WriteBatch::Handler { public: enum Optype {PutRecord, MergeRecord, DeleteRecord}; class BatchItem { public: BatchItem( const Optype& op, uint32_t column_family_id, const rocksdb::Slice& key, const rocksdb::Slice& value): op(op), column_family_id(column_family_id), key(key), value(value) {} const Optype op; uint32_t column_family_id; const rocksdb::Slice key; const rocksdb::Slice value; }; typedef std::vector BatchItems; public: /* Items is filled during iteration. */ RecordItemsHandler(BatchItems* items): items(items) {} virtual rocksdb::Status PutCF( uint32_t column_family_id, const Slice& key, const Slice& value) { this->items->emplace_back(PutRecord, column_family_id, key, value); return rocksdb::Status::OK(); } virtual rocksdb::Status MergeCF( uint32_t column_family_id, const Slice& key, const Slice& value) { this->items->emplace_back(MergeRecord, column_family_id, key, value); return rocksdb::Status::OK(); } virtual rocksdb::Status DeleteCF( uint32_t column_family_id, const Slice& key) { this->items->emplace_back(DeleteRecord, column_family_id, key, rocksdb::Slice()); return rocksdb::Status::OK(); } private: BatchItems* items; }; rocksdb::Status get_batch_items(const rocksdb::WriteBatch* batch, RecordItemsHandler::BatchItems* items) { RecordItemsHandler handler(items); return batch->Iterate(&handler); } } python-rocksdb-0.8.0~rc3/rocksdb/db.pxd000066400000000000000000000166451400433636700200460ustar00rootroot00000000000000from . cimport options from libc.stdint cimport uint64_t, uint32_t from .status cimport Status from libcpp cimport bool as cpp_bool from libcpp.string cimport string from libcpp.vector cimport vector from .slice_ cimport Slice from .snapshot cimport Snapshot from .iterator cimport Iterator cdef extern from "rocksdb/write_batch.h" namespace "rocksdb": cdef cppclass WriteBatch: WriteBatch() nogil except+ WriteBatch(string) nogil except+ void Put(const Slice&, const Slice&) nogil except+ void Put(ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ void Merge(const Slice&, const Slice&) nogil except+ void Merge(ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ void Delete(const Slice&) nogil except+ void Delete(ColumnFamilyHandle*, const Slice&) nogil except+ void PutLogData(const Slice&) nogil except+ void Clear() nogil except+ const string& Data() nogil except+ int Count() nogil except+ cdef extern from "cpp/write_batch_iter_helper.hpp" namespace "py_rocks": cdef enum BatchItemOp "RecordItemsHandler::Optype": BatchItemOpPut "py_rocks::RecordItemsHandler::Optype::PutRecord" BatchItemOpMerge "py_rocks::RecordItemsHandler::Optype::MergeRecord" BatchItemOpDelte "py_rocks::RecordItemsHandler::Optype::DeleteRecord" cdef cppclass BatchItem "py_rocks::RecordItemsHandler::BatchItem": BatchItemOp op uint32_t column_family_id Slice key Slice value Status get_batch_items(WriteBatch* batch, vector[BatchItem]* items) cdef extern from "rocksdb/db.h" namespace "rocksdb": ctypedef uint64_t SequenceNumber string kDefaultColumnFamilyName cdef struct LiveFileMetaData: string name int level uint64_t size string smallestkey string largestkey SequenceNumber smallest_seqno SequenceNumber largest_seqno # cdef struct SstFileMetaData: # uint64_t size # string name # uint64_t file_number # string db_path # string smallestkey # string largestkey # SequenceNumber smallest_seqno # SequenceNumber largest_seqno # cdef struct LevelMetaData: # int level # uint64_t size # string largestkey # LiveFileMetaData files cdef struct ColumnFamilyMetaData: uint64_t size uint64_t file_count # string largestkey # LevelMetaData levels cdef cppclass Range: Range(const Slice&, const Slice&) cdef cppclass DB: Status Put( const options.WriteOptions&, ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ Status Delete( const options.WriteOptions&, ColumnFamilyHandle*, const Slice&) nogil except+ Status Merge( const options.WriteOptions&, ColumnFamilyHandle*, const Slice&, const Slice&) nogil except+ Status Write( const options.WriteOptions&, WriteBatch*) nogil except+ Status Get( const options.ReadOptions&, ColumnFamilyHandle*, const Slice&, string*) nogil except+ vector[Status] MultiGet( const options.ReadOptions&, const vector[ColumnFamilyHandle*]&, const vector[Slice]&, vector[string]*) nogil except+ cpp_bool KeyMayExist( const options.ReadOptions&, ColumnFamilyHandle*, Slice&, string*, cpp_bool*) nogil except+ cpp_bool KeyMayExist( const options.ReadOptions&, ColumnFamilyHandle*, Slice&, string*) nogil except+ Iterator* NewIterator( const options.ReadOptions&, ColumnFamilyHandle*) nogil except+ void NewIterators( const options.ReadOptions&, vector[ColumnFamilyHandle*]&, vector[Iterator*]*) nogil except+ const Snapshot* GetSnapshot() nogil except+ void ReleaseSnapshot(const Snapshot*) nogil except+ cpp_bool GetProperty( ColumnFamilyHandle*, const Slice&, string*) nogil except+ void GetApproximateSizes( ColumnFamilyHandle*, const Range* int, uint64_t*) nogil except+ Status CompactRange( const options.CompactRangeOptions&, ColumnFamilyHandle*, const Slice*, const Slice*) nogil except+ Status CreateColumnFamily( const options.ColumnFamilyOptions&, const string&, ColumnFamilyHandle**) nogil except+ Status DropColumnFamily( ColumnFamilyHandle*) nogil except+ int NumberLevels(ColumnFamilyHandle*) nogil except+ int MaxMemCompactionLevel(ColumnFamilyHandle*) nogil except+ int Level0StopWriteTrigger(ColumnFamilyHandle*) nogil except+ const string& GetName() nogil except+ const options.Options& GetOptions(ColumnFamilyHandle*) nogil except+ Status Flush(const options.FlushOptions&, ColumnFamilyHandle*) nogil except+ Status DisableFileDeletions() nogil except+ Status EnableFileDeletions() nogil except+ Status Close() nogil except+ # TODO: Status GetSortedWalFiles(VectorLogPtr& files) # TODO: SequenceNumber GetLatestSequenceNumber() # TODO: Status GetUpdatesSince( # SequenceNumber seq_number, # unique_ptr[TransactionLogIterator]*) Status DeleteFile(string) nogil except+ void GetLiveFilesMetaData(vector[LiveFileMetaData]*) nogil except+ void GetColumnFamilyMetaData(ColumnFamilyHandle*, ColumnFamilyMetaData*) nogil except+ ColumnFamilyHandle* DefaultColumnFamily() cdef Status DB_Open "rocksdb::DB::Open"( const options.Options&, const string&, DB**) nogil except+ cdef Status DB_Open_ColumnFamilies "rocksdb::DB::Open"( const options.Options&, const string&, const vector[ColumnFamilyDescriptor]&, vector[ColumnFamilyHandle*]*, DB**) nogil except+ cdef Status DB_OpenForReadOnly "rocksdb::DB::OpenForReadOnly"( const options.Options&, const string&, DB**, cpp_bool) nogil except+ cdef Status DB_OpenForReadOnly_ColumnFamilies "rocksdb::DB::OpenForReadOnly"( const options.Options&, const string&, const vector[ColumnFamilyDescriptor]&, vector[ColumnFamilyHandle*]*, DB**, cpp_bool) nogil except+ cdef Status RepairDB(const string& dbname, const options.Options&) cdef Status ListColumnFamilies "rocksdb::DB::ListColumnFamilies" ( const options.Options&, const string&, vector[string]*) nogil except+ cdef cppclass ColumnFamilyHandle: const string& GetName() nogil except+ int GetID() nogil except+ cdef cppclass ColumnFamilyDescriptor: ColumnFamilyDescriptor() nogil except+ ColumnFamilyDescriptor( const string&, const options.ColumnFamilyOptions&) nogil except+ string name options.ColumnFamilyOptions options cdef extern from "rocksdb/convenience.h" namespace "rocksdb": void CancelAllBackgroundWork(DB*, cpp_bool) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/env.pxd000066400000000000000000000002201400433636700202270ustar00rootroot00000000000000cdef extern from "rocksdb/env.h" namespace "rocksdb": cdef cppclass Env: Env() cdef Env* Env_Default "rocksdb::Env::Default"() python-rocksdb-0.8.0~rc3/rocksdb/errors.py000066400000000000000000000004341400433636700206170ustar00rootroot00000000000000class NotFound(Exception): pass class Corruption(Exception): pass class NotSupported(Exception): pass class InvalidArgument(Exception): pass class RocksIOError(Exception): pass class MergeInProgress(Exception): pass class Incomplete(Exception): pass python-rocksdb-0.8.0~rc3/rocksdb/filter_policy.pxd000066400000000000000000000021141400433636700223070ustar00rootroot00000000000000from libcpp cimport bool as cpp_bool from libcpp.string cimport string from libc.string cimport const_char from .slice_ cimport Slice from .std_memory cimport shared_ptr from .logger cimport Logger cdef extern from "rocksdb/filter_policy.h" namespace "rocksdb": cdef cppclass FilterPolicy: void CreateFilter(const Slice*, int, string*) nogil except+ cpp_bool KeyMayMatch(const Slice&, const Slice&) nogil except+ const_char* Name() nogil except+ cdef extern const FilterPolicy* NewBloomFilterPolicy(int) nogil except+ ctypedef void (*create_filter_func)( void*, Logger*, string&, const Slice*, int, string*) ctypedef cpp_bool (*key_may_match_func)( void*, Logger*, string&, const Slice&, const Slice&) cdef extern from "cpp/filter_policy_wrapper.hpp" namespace "py_rocks": cdef cppclass FilterPolicyWrapper: FilterPolicyWrapper( string, void*, create_filter_func, key_may_match_func) nogil except+ void set_info_log(shared_ptr[Logger]) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/interfaces.py000066400000000000000000000023561400433636700214330ustar00rootroot00000000000000from abc import ABCMeta from abc import abstractmethod class Comparator: __metaclass__ = ABCMeta @abstractmethod def compare(self, a, b): pass @abstractmethod def name(self): pass class AssociativeMergeOperator: __metaclass__ = ABCMeta @abstractmethod def merge(self, key, existing_value, value): pass @abstractmethod def name(self): pass class MergeOperator: __metaclass__ = ABCMeta @abstractmethod def full_merge(self, key, existing_value, operand_list): pass @abstractmethod def partial_merge(self, key, left_operand, right_operand): pass @abstractmethod def name(self): pass class FilterPolicy: __metaclass__ = ABCMeta @abstractmethod def name(self): pass @abstractmethod def create_filter(self, keys): pass @abstractmethod def key_may_match(self, key, filter_): pass class SliceTransform: __metaclass__ = ABCMeta @abstractmethod def name(self): pass @abstractmethod def transform(self, src): pass @abstractmethod def in_domain(self, src): pass @abstractmethod def in_range(self, dst): pass python-rocksdb-0.8.0~rc3/rocksdb/iterator.pxd000066400000000000000000000010771400433636700213030ustar00rootroot00000000000000from libcpp cimport bool as cpp_bool from .slice_ cimport Slice from .status cimport Status cdef extern from "rocksdb/iterator.h" namespace "rocksdb": cdef cppclass Iterator: cpp_bool Valid() nogil except+ void SeekToFirst() nogil except+ void SeekToLast() nogil except+ void Seek(const Slice&) nogil except+ void Next() nogil except+ void Prev() nogil except+ void SeekForPrev(const Slice&) nogil except+ Slice key() nogil except+ Slice value() nogil except+ Status status() nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/logger.pxd000066400000000000000000000002241400433636700207220ustar00rootroot00000000000000cdef extern from "rocksdb/env.h" namespace "rocksdb": cdef cppclass Logger: pass void Log(Logger*, const char*, ...) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/memtablerep.pxd000066400000000000000000000007441400433636700217470ustar00rootroot00000000000000from libc.stdint cimport int32_t cdef extern from "rocksdb/memtablerep.h" namespace "rocksdb": cdef cppclass MemTableRepFactory: MemTableRepFactory() cdef MemTableRepFactory* NewHashSkipListRepFactory(size_t, int32_t, int32_t) cdef MemTableRepFactory* NewHashLinkListRepFactory(size_t) cdef extern from "cpp/memtable_factories.hpp" namespace "py_rocks": cdef MemTableRepFactory* NewVectorRepFactory(size_t) cdef MemTableRepFactory* NewSkipListFactory() python-rocksdb-0.8.0~rc3/rocksdb/merge_operator.pxd000066400000000000000000000022661400433636700224650ustar00rootroot00000000000000from libcpp.string cimport string from libcpp cimport bool as cpp_bool from libcpp.deque cimport deque from .slice_ cimport Slice from .logger cimport Logger from .std_memory cimport shared_ptr cdef extern from "rocksdb/merge_operator.h" namespace "rocksdb": cdef cppclass MergeOperator: pass ctypedef cpp_bool (*merge_func)( void*, const Slice&, const Slice*, const Slice&, string*, Logger*) ctypedef cpp_bool (*full_merge_func)( void* ctx, const Slice& key, const Slice* existing_value, const deque[string]& operand_list, string* new_value, Logger* logger) ctypedef cpp_bool (*partial_merge_func)( void* ctx, const Slice& key, const Slice& left_op, const Slice& right_op, string* new_value, Logger* logger) cdef extern from "cpp/merge_operator_wrapper.hpp" namespace "py_rocks": cdef cppclass AssociativeMergeOperatorWrapper: AssociativeMergeOperatorWrapper(string, void*, merge_func) nogil except+ cdef cppclass MergeOperatorWrapper: MergeOperatorWrapper( string, void*, void*, full_merge_func, partial_merge_func) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/merge_operators.py000066400000000000000000000013231400433636700224760ustar00rootroot00000000000000import struct as py_struct from rocksdb.interfaces import AssociativeMergeOperator class UintAddOperator(AssociativeMergeOperator): def merge(self, key, existing_value, value): if existing_value: s = py_struct.unpack('Q', existing_value)[0] + py_struct.unpack('Q', value)[0] return (True, py_struct.pack('Q', s)) return (True, value) def name(self): return b'uint64add' class StringAppendOperator(AssociativeMergeOperator): def merge(self, key, existing_value, value): if existing_value: s = existing_value + b',' + value return (True, s) return (True, value) def name(self): return b'StringAppendOperator' python-rocksdb-0.8.0~rc3/rocksdb/options.pxd000066400000000000000000000145331400433636700211460ustar00rootroot00000000000000from libcpp cimport bool as cpp_bool from libcpp.string cimport string from libcpp.vector cimport vector from libc.stdint cimport uint64_t from libc.stdint cimport uint32_t from .std_memory cimport shared_ptr from .comparator cimport Comparator from .merge_operator cimport MergeOperator from .logger cimport Logger from .slice_ cimport Slice from .snapshot cimport Snapshot from .slice_transform cimport SliceTransform from .table_factory cimport TableFactory #from .statistics cimport Statistics from .memtablerep cimport MemTableRepFactory from .universal_compaction cimport CompactionOptionsUniversal from .cache cimport Cache cdef extern from "rocksdb/options.h" namespace "rocksdb": cdef cppclass CompressionOptions: int window_bits; int level; int strategy; uint32_t max_dict_bytes CompressionOptions() except + CompressionOptions(int, int, int, int) except + ctypedef enum CompactionStyle: kCompactionStyleLevel kCompactionStyleUniversal kCompactionStyleFIFO kCompactionStyleNone ctypedef enum CompressionType: kNoCompression kSnappyCompression kZlibCompression kBZip2Compression kLZ4Compression kLZ4HCCompression kXpressCompression kZSTD kZSTDNotFinalCompression kDisableCompressionOption ctypedef enum ReadTier: kReadAllTier kBlockCacheTier ctypedef enum CompactionPri: kByCompensatedSize kOldestLargestSeqFirst kOldestSmallestSeqFirst kMinOverlappingRatio # This needs to be in _rocksdb.pxd so it will export into python #cpdef enum AccessHint "rocksdb::DBOptions::AccessHint": # NONE, # NORMAL, # SEQUENTIAL, # WILLNEED cdef cppclass DBOptions: cpp_bool create_if_missing cpp_bool create_missing_column_families cpp_bool error_if_exists cpp_bool paranoid_checks # TODO: env shared_ptr[Logger] info_log int max_open_files int max_file_opening_threads #shared_ptr[Statistics] statistics cpp_bool use_fsync string db_log_dir string wal_dir uint64_t delete_obsolete_files_period_micros int max_background_jobs int max_background_compactions uint32_t max_subcompactions int max_background_flushes size_t max_log_file_size size_t log_file_time_to_roll size_t keep_log_file_num size_t recycle_log_file_num size_t stats_history_buffer_size uint64_t max_manifest_file_size int table_cache_numshardbits uint64_t WAL_ttl_seconds uint64_t WAL_size_limit_MB size_t manifest_preallocation_size cpp_bool allow_mmap_reads cpp_bool allow_mmap_writes cpp_bool use_direct_reads cpp_bool use_direct_io_for_flush_and_compaction cpp_bool allow_fallocate cpp_bool is_fd_close_on_exec cpp_bool skip_log_error_on_recovery unsigned int stats_dump_period_sec cpp_bool advise_random_on_open size_t db_write_buffer_size # AccessHint access_hint_on_compaction_start cpp_bool use_adaptive_mutex uint64_t bytes_per_sync cpp_bool allow_concurrent_memtable_write cpp_bool enable_write_thread_adaptive_yield shared_ptr[Cache] row_cache void IncreaseParallelism(int) nogil except+ cdef cppclass ColumnFamilyOptions: ColumnFamilyOptions() ColumnFamilyOptions(const Options& options) const Comparator* comparator shared_ptr[MergeOperator] merge_operator # TODO: compaction_filter # TODO: compaction_filter_factory size_t write_buffer_size int max_write_buffer_number int min_write_buffer_number_to_merge CompressionType compression CompactionPri compaction_pri # TODO: compression_per_level shared_ptr[SliceTransform] prefix_extractor int num_levels int level0_file_num_compaction_trigger int level0_slowdown_writes_trigger int level0_stop_writes_trigger int max_mem_compaction_level uint64_t target_file_size_base int target_file_size_multiplier uint64_t max_bytes_for_level_base double max_bytes_for_level_multiplier vector[int] max_bytes_for_level_multiplier_additional int expanded_compaction_factor int source_compaction_factor int max_grandparent_overlap_factor cpp_bool disableDataSync double soft_rate_limit double hard_rate_limit unsigned int rate_limit_delay_max_milliseconds size_t arena_block_size # TODO: PrepareForBulkLoad() cpp_bool disable_auto_compactions cpp_bool purge_redundant_kvs_while_flush cpp_bool allow_os_buffer cpp_bool verify_checksums_in_compaction CompactionStyle compaction_style CompactionOptionsUniversal compaction_options_universal cpp_bool filter_deletes uint64_t max_sequential_skip_in_iterations shared_ptr[MemTableRepFactory] memtable_factory shared_ptr[TableFactory] table_factory # TODO: table_properties_collectors cpp_bool inplace_update_support size_t inplace_update_num_locks # TODO: remove options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor from document uint64_t max_compaction_bytes CompressionOptions compression_opts cpp_bool optimize_filters_for_hits cpp_bool paranoid_file_checks cdef cppclass Options(DBOptions, ColumnFamilyOptions): pass cdef cppclass WriteOptions: cpp_bool sync cpp_bool disableWAL cdef cppclass ReadOptions: cpp_bool verify_checksums cpp_bool fill_cache const Snapshot* snapshot ReadTier read_tier cdef cppclass FlushOptions: cpp_bool wait ctypedef enum BottommostLevelCompaction: blc_skip "rocksdb::BottommostLevelCompaction::kSkip" blc_is_filter "rocksdb::BottommostLevelCompaction::kIfHaveCompactionFilter" blc_force "rocksdb::BottommostLevelCompaction::kForce" cdef cppclass CompactRangeOptions: cpp_bool change_level int target_level uint32_t target_path_id BottommostLevelCompaction bottommost_level_compaction python-rocksdb-0.8.0~rc3/rocksdb/slice_.pxd000066400000000000000000000012011400433636700206750ustar00rootroot00000000000000from libcpp.string cimport string from libcpp cimport bool as cpp_bool cdef extern from "rocksdb/slice.h" namespace "rocksdb": cdef cppclass Slice: Slice() nogil Slice(const char*, size_t) nogil Slice(const string&) nogil Slice(const char*) nogil const char* data() nogil size_t size() nogil cpp_bool empty() nogil char operator[](int) nogil void clear() nogil void remove_prefix(size_t) nogil string ToString() nogil string ToString(cpp_bool) nogil int compare(const Slice&) nogil cpp_bool starts_with(const Slice&) nogil python-rocksdb-0.8.0~rc3/rocksdb/slice_transform.pxd000066400000000000000000000016261400433636700226440ustar00rootroot00000000000000from .slice_ cimport Slice from libcpp.string cimport string from libcpp cimport bool as cpp_bool from .logger cimport Logger from .std_memory cimport shared_ptr cdef extern from "rocksdb/slice_transform.h" namespace "rocksdb": cdef cppclass SliceTransform: pass ctypedef Slice (*transform_func)( void*, Logger*, string&, const Slice&) ctypedef cpp_bool (*in_domain_func)( void*, Logger*, string&, const Slice&) ctypedef cpp_bool (*in_range_func)( void*, Logger*, string&, const Slice&) cdef extern from "cpp/slice_transform_wrapper.hpp" namespace "py_rocks": cdef cppclass SliceTransformWrapper: SliceTransformWrapper( string name, void*, transform_func, in_domain_func, in_range_func) nogil except+ void set_info_log(shared_ptr[Logger]) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/snapshot.pxd000066400000000000000000000001361400433636700213040ustar00rootroot00000000000000cdef extern from "rocksdb/db.h" namespace "rocksdb": cdef cppclass Snapshot: pass python-rocksdb-0.8.0~rc3/rocksdb/statistics.pxd000066400000000000000000000005771400433636700216500ustar00rootroot00000000000000from libc.stdint cimport uint32_t, uint8_t from .std_memory cimport shared_ptr cdef extern from "rocksdb/statistics.h" namespace "rocksdb": ctypedef enum StatsLevel: kExceptHistogramOrTimers kExceptTimers kExceptDetailedTimers kExceptTimeForMutex kAll cdef cppclass Statistics: void set_stats_level(StatsLevel) nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/status.pxd000066400000000000000000000010531400433636700207670ustar00rootroot00000000000000from libcpp cimport bool as cpp_bool from libcpp.string cimport string cdef extern from "rocksdb/status.h" namespace "rocksdb": cdef cppclass Status: Status() cpp_bool ok() nogil cpp_bool IsNotFound() nogil const cpp_bool IsCorruption() nogil const cpp_bool IsNotSupported() nogil const cpp_bool IsInvalidArgument() nogil const cpp_bool IsIOError() nogil const cpp_bool IsMergeInProgress() nogil const cpp_bool IsIncomplete() nogil const string ToString() nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/std_memory.pxd000066400000000000000000000003751400433636700216340ustar00rootroot00000000000000cdef extern from "" namespace "std": cdef cppclass shared_ptr[T]: shared_ptr() nogil except+ shared_ptr(T*) nogil except+ void reset() nogil except+ void reset(T*) nogil except+ T* get() nogil except+ python-rocksdb-0.8.0~rc3/rocksdb/table_factory.pxd000066400000000000000000000032211400433636700222610ustar00rootroot00000000000000from libc.stdint cimport uint32_t from libcpp cimport bool as cpp_bool from .std_memory cimport shared_ptr from .cache cimport Cache from .filter_policy cimport FilterPolicy cdef extern from "rocksdb/table.h" namespace "rocksdb": cdef cppclass TableFactory: TableFactory() ctypedef enum BlockBasedTableIndexType: kBinarySearch "rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch" kHashSearch "rocksdb::BlockBasedTableOptions::IndexType::kHashSearch" ctypedef enum ChecksumType: kCRC32c kxxHash cdef cppclass BlockBasedTableOptions: BlockBasedTableOptions() BlockBasedTableIndexType index_type cpp_bool hash_index_allow_collision ChecksumType checksum cpp_bool no_block_cache size_t block_size int block_size_deviation int block_restart_interval cpp_bool whole_key_filtering shared_ptr[Cache] block_cache shared_ptr[Cache] block_cache_compressed shared_ptr[FilterPolicy] filter_policy cpp_bool enable_index_compression cpp_bool cache_index_and_filter_blocks int format_version cdef TableFactory* NewBlockBasedTableFactory(const BlockBasedTableOptions&) ctypedef enum EncodingType: kPlain kPrefix cdef cppclass PlainTableOptions: uint32_t user_key_len int bloom_bits_per_key double hash_table_ratio size_t index_sparseness size_t huge_page_tlb_size EncodingType encoding_type cpp_bool full_scan_mode cpp_bool store_index_in_file cdef TableFactory* NewPlainTableFactory(const PlainTableOptions&) python-rocksdb-0.8.0~rc3/rocksdb/tests/000077500000000000000000000000001400433636700200725ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/rocksdb/tests/__init__.py000066400000000000000000000000001400433636700221710ustar00rootroot00000000000000python-rocksdb-0.8.0~rc3/rocksdb/tests/test_db.py000066400000000000000000000521411400433636700220730ustar00rootroot00000000000000import os import sys import shutil import gc import unittest import rocksdb from itertools import takewhile import struct import tempfile from rocksdb.merge_operators import UintAddOperator, StringAppendOperator def int_to_bytes(ob): return str(ob).encode('ascii') class TestHelper(unittest.TestCase): def setUp(self): self.db_loc = tempfile.mkdtemp() self.addCleanup(self._close_db) def _close_db(self): del self.db gc.collect() if os.path.exists(self.db_loc): shutil.rmtree(self.db_loc) class TestDB(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options(create_if_missing=True) self.db = rocksdb.DB(os.path.join(self.db_loc, "test"), opts) def test_options_used_twice(self): if sys.version_info[0] == 3: assertRaisesRegex = self.assertRaisesRegex else: assertRaisesRegex = self.assertRaisesRegexp expected = "Options object is already used by another DB" with assertRaisesRegex(Exception, expected): rocksdb.DB(os.path.join(self.db_loc, "test2"), self.db.options) def test_unicode_path(self): name = os.path.join(self.db_loc, b'M\xc3\xbcnchen'.decode('utf8')) rocksdb.DB(name, rocksdb.Options(create_if_missing=True)) self.addCleanup(shutil.rmtree, name) self.assertTrue(os.path.isdir(name)) def test_get_none(self): self.assertIsNone(self.db.get(b'xxx')) def test_put_get(self): self.db.put(b"a", b"b") self.assertEqual(b"b", self.db.get(b"a")) def test_multi_get(self): self.db.put(b"a", b"1") self.db.put(b"b", b"2") self.db.put(b"c", b"3") ret = self.db.multi_get([b'a', b'b', b'c']) ref = {b'a': b'1', b'c': b'3', b'b': b'2'} self.assertEqual(ref, ret) def test_delete(self): self.db.put(b"a", b"b") self.assertEqual(b"b", self.db.get(b"a")) self.db.delete(b"a") self.assertIsNone(self.db.get(b"a")) def test_write_batch(self): batch = rocksdb.WriteBatch() batch.put(b"key", b"v1") batch.delete(b"key") batch.put(b"key", b"v2") batch.put(b"key", b"v3") batch.put(b"a", b"b") self.db.write(batch) ref = {b'a': b'b', b'key': b'v3'} ret = self.db.multi_get([b'key', b'a']) self.assertEqual(ref, ret) def test_write_batch_iter(self): batch = rocksdb.WriteBatch() self.assertEqual([], list(batch)) batch.put(b"key1", b"v1") batch.put(b"key2", b"v2") batch.put(b"key3", b"v3") batch.delete(b'a') batch.delete(b'key1') batch.merge(b'xxx', b'value') it = iter(batch) del batch ref = [ ('Put', b'key1', b'v1'), ('Put', b'key2', b'v2'), ('Put', b'key3', b'v3'), ('Delete', b'a', b''), ('Delete', b'key1', b''), ('Merge', b'xxx', b'value') ] self.assertEqual(ref, list(it)) def test_key_may_exists(self): self.db.put(b"a", b'1') self.assertEqual((False, None), self.db.key_may_exist(b"x")) self.assertEqual((False, None), self.db.key_may_exist(b'x', True)) self.assertEqual((True, None), self.db.key_may_exist(b'a')) self.assertEqual((True, b'1'), self.db.key_may_exist(b'a', True)) def test_seek_for_prev(self): self.db.put(b'a1', b'a1_value') self.db.put(b'a3', b'a3_value') self.db.put(b'b1', b'b1_value') self.db.put(b'b2', b'b2_value') self.db.put(b'c2', b'c2_value') self.db.put(b'c4', b'c4_value') self.assertEqual(self.db.get(b'a1'), b'a1_value') it = self.db.iterkeys() it.seek(b'a1') self.assertEqual(it.get(), b'a1') it.seek(b'a3') self.assertEqual(it.get(), b'a3') it.seek_for_prev(b'c4') self.assertEqual(it.get(), b'c4') it.seek_for_prev(b'c3') self.assertEqual(it.get(), b'c2') it = self.db.itervalues() it.seek(b'a1') self.assertEqual(it.get(), b'a1_value') it.seek(b'a3') self.assertEqual(it.get(), b'a3_value') it.seek_for_prev(b'c4') self.assertEqual(it.get(), b'c4_value') it.seek_for_prev(b'c3') self.assertEqual(it.get(), b'c2_value') it = self.db.iteritems() it.seek(b'a1') self.assertEqual(it.get(), (b'a1', b'a1_value')) it.seek(b'a3') self.assertEqual(it.get(), (b'a3', b'a3_value')) it.seek_for_prev(b'c4') self.assertEqual(it.get(), (b'c4', b'c4_value')) it.seek_for_prev(b'c3') self.assertEqual(it.get(), (b'c2', b'c2_value')) reverse_it = reversed(it) it.seek_for_prev(b'c3') self.assertEqual(it.get(), (b'c2', b'c2_value')) def test_iter_keys(self): for x in range(300): self.db.put(int_to_bytes(x), int_to_bytes(x)) it = self.db.iterkeys() self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([b'99'], list(it)) ref = sorted([int_to_bytes(x) for x in range(300)]) it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = [ b'90', b'91', b'92', b'93', b'94', b'95', b'96', b'97', b'98', b'99' ] self.assertEqual(ref, list(it)) def test_iter_values(self): for x in range(300): self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) it = self.db.itervalues() self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([b'99000'], list(it)) ref = sorted([int_to_bytes(x) for x in range(300)]) ref = [int_to_bytes(int(x) * 1000) for x in ref] it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = [int_to_bytes(x * 1000) for x in range(90, 100)] self.assertEqual(ref, list(it)) def test_iter_items(self): for x in range(300): self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) it = self.db.iteritems() self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([(b'99', b'99000')], list(it)) ref = sorted([int_to_bytes(x) for x in range(300)]) ref = [(x, int_to_bytes(int(x) * 1000)) for x in ref] it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = [(int_to_bytes(x), int_to_bytes(x * 1000)) for x in range(90, 100)] self.assertEqual(ref, list(it)) def test_reverse_iter(self): for x in range(100): self.db.put(int_to_bytes(x), int_to_bytes(x * 1000)) it = self.db.iteritems() it.seek_to_last() ref = reversed(sorted([int_to_bytes(x) for x in range(100)])) ref = [(x, int_to_bytes(int(x) * 1000)) for x in ref] self.assertEqual(ref, list(reversed(it))) def test_snapshot(self): self.db.put(b"a", b"1") self.db.put(b"b", b"2") snapshot = self.db.snapshot() self.db.put(b"a", b"2") self.db.delete(b"b") it = self.db.iteritems() it.seek_to_first() self.assertEqual({b'a': b'2'}, dict(it)) it = self.db.iteritems(snapshot=snapshot) it.seek_to_first() self.assertEqual({b'a': b'1', b'b': b'2'}, dict(it)) def test_get_property(self): for x in range(300): x = int_to_bytes(x) self.db.put(x, x) self.assertIsNotNone(self.db.get_property(b'rocksdb.stats')) self.assertIsNotNone(self.db.get_property(b'rocksdb.sstables')) self.assertIsNotNone(self.db.get_property(b'rocksdb.num-files-at-level0')) self.assertIsNone(self.db.get_property(b'does not exsits')) def test_compact_range(self): for x in range(10000): x = int_to_bytes(x) self.db.put(x, x) self.db.compact_range() class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator): def merge(self, key, existing_value, value): if existing_value: return (True, int_to_bytes(int(existing_value) + int(value))) return (True, value) def name(self): return b'AssocCounter' class TestUint64Merge(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options() opts.create_if_missing = True opts.merge_operator = UintAddOperator() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) def test_merge(self): self.db.put(b'a', struct.pack('Q', 5566)) for x in range(1000): self.db.merge(b"a", struct.pack('Q', x)) self.assertEqual(5566 + sum(range(1000)), struct.unpack('Q', self.db.get(b'a'))[0]) # class TestPutMerge(TestHelper): # def setUp(self): # TestHelper.setUp(self) # opts = rocksdb.Options() # opts.create_if_missing = True # opts.merge_operator = "put" # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) # def test_merge(self): # self.db.put(b'a', b'ccc') # self.db.merge(b'a', b'ddd') # self.assertEqual(self.db.get(b'a'), 'ddd') # class TestPutV1Merge(TestHelper): # def setUp(self): # TestHelper.setUp(self) # opts = rocksdb.Options() # opts.create_if_missing = True # opts.merge_operator = "put_v1" # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) # def test_merge(self): # self.db.put(b'a', b'ccc') # self.db.merge(b'a', b'ddd') # self.assertEqual(self.db.get(b'a'), 'ddd') class TestStringAppendOperatorMerge(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options() opts.create_if_missing = True opts.merge_operator = StringAppendOperator() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) # NOTE(sileht): Raise "Corruption: Error: Could not perform merge." on PY3 #@unittest.skipIf(sys.version_info[0] == 3, # "Unexpected behavior on PY3") def test_merge(self): self.db.put(b'a', b'ccc') self.db.merge(b'a', b'ddd') self.assertEqual(self.db.get(b'a'), b'ccc,ddd') # class TestStringMaxOperatorMerge(TestHelper): # def setUp(self): # TestHelper.setUp(self) # opts = rocksdb.Options() # opts.create_if_missing = True # opts.merge_operator = "max" # self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) # def test_merge(self): # self.db.put(b'a', int_to_bytes(55)) # self.db.merge(b'a', int_to_bytes(56)) # self.assertEqual(int(self.db.get(b'a')), 56) class TestAssocMerge(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options() opts.create_if_missing = True opts.merge_operator = AssocCounter() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) def test_merge(self): for x in range(1000): self.db.merge(b"a", int_to_bytes(x)) self.assertEqual(sum(range(1000)), int(self.db.get(b'a'))) class FullCounter(rocksdb.interfaces.MergeOperator): def name(self): return b'fullcounter' def full_merge(self, key, existing_value, operand_list): ret = sum([int(x) for x in operand_list]) if existing_value: ret += int(existing_value) return (True, int_to_bytes(ret)) def partial_merge(self, key, left, right): return (True, int_to_bytes(int(left) + int(right))) class TestFullMerge(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options() opts.create_if_missing = True opts.merge_operator = FullCounter() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) def test_merge(self): for x in range(1000): self.db.merge(b"a", int_to_bytes(x)) self.assertEqual(sum(range(1000)), int(self.db.get(b'a'))) class SimpleComparator(rocksdb.interfaces.Comparator): def name(self): return b'mycompare' def compare(self, a, b): a = int(a) b = int(b) if a < b: return -1 if a == b: return 0 if a > b: return 1 class TestComparator(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options() opts.create_if_missing = True opts.comparator = SimpleComparator() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) def test_compare(self): for x in range(1000): self.db.put(int_to_bytes(x), int_to_bytes(x)) self.assertEqual(b'300', self.db.get(b'300')) class StaticPrefix(rocksdb.interfaces.SliceTransform): def name(self): return b'static' def transform(self, src): return (0, 5) def in_domain(self, src): return len(src) >= 5 def in_range(self, dst): return len(dst) == 5 class TestPrefixExtractor(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options(create_if_missing=True) opts.prefix_extractor = StaticPrefix() self.db = rocksdb.DB(os.path.join(self.db_loc, 'test'), opts) def _fill_db(self): for x in range(3000): keyx = hex(x)[2:].zfill(5).encode('utf8') + b'.x' keyy = hex(x)[2:].zfill(5).encode('utf8') + b'.y' keyz = hex(x)[2:].zfill(5).encode('utf8') + b'.z' self.db.put(keyx, b'x') self.db.put(keyy, b'y') self.db.put(keyz, b'z') def test_prefix_iterkeys(self): self._fill_db() self.assertEqual(b'x', self.db.get(b'00001.x')) self.assertEqual(b'y', self.db.get(b'00001.y')) self.assertEqual(b'z', self.db.get(b'00001.z')) it = self.db.iterkeys() it.seek(b'00002') ref = [b'00002.x', b'00002.y', b'00002.z'] ret = takewhile(lambda key: key.startswith(b'00002'), it) self.assertEqual(ref, list(ret)) def test_prefix_iteritems(self): self._fill_db() it = self.db.iteritems() it.seek(b'00002') ref = {b'00002.z': b'z', b'00002.y': b'y', b'00002.x': b'x'} ret = takewhile(lambda item: item[0].startswith(b'00002'), it) self.assertEqual(ref, dict(ret)) class TestDBColumnFamilies(TestHelper): def setUp(self): TestHelper.setUp(self) opts = rocksdb.Options(create_if_missing=True) self.db = rocksdb.DB( os.path.join(self.db_loc, 'test'), opts, ) self.cf_a = self.db.create_column_family(b'A', rocksdb.ColumnFamilyOptions()) self.cf_b = self.db.create_column_family(b'B', rocksdb.ColumnFamilyOptions()) def test_column_families(self): families = self.db.column_families names = [handle.name for handle in families] self.assertEqual([b'default', b'A', b'B'], names) for name in names: self.assertIn(self.db.get_column_family(name), families) self.assertEqual( names, rocksdb.list_column_families( os.path.join(self.db_loc, 'test'), rocksdb.Options(), ) ) def test_get_none(self): self.assertIsNone(self.db.get(b'k')) self.assertIsNone(self.db.get((self.cf_a, b'k'))) self.assertIsNone(self.db.get((self.cf_b, b'k'))) def test_put_get(self): key = (self.cf_a, b'k') self.db.put(key, b"v") self.assertEqual(b"v", self.db.get(key)) self.assertIsNone(self.db.get(b"k")) self.assertIsNone(self.db.get((self.cf_b, b"k"))) def test_multi_get(self): data = [ (b'a', b'1default'), (b'b', b'2default'), (b'c', b'3default'), ((self.cf_a, b'a'), b'1a'), ((self.cf_a, b'b'), b'2a'), ((self.cf_a, b'c'), b'3a'), ((self.cf_b, b'a'), b'1b'), ((self.cf_b, b'b'), b'2b'), ((self.cf_b, b'c'), b'3b'), ] for value in data: self.db.put(*value) multi_get_lookup = [value[0] for value in data] ret = self.db.multi_get(multi_get_lookup) ref = {value[0]: value[1] for value in data} self.assertEqual(ref, ret) def test_delete(self): self.db.put((self.cf_a, b"a"), b"b") self.assertEqual(b"b", self.db.get((self.cf_a, b"a"))) self.db.delete((self.cf_a, b"a")) self.assertIsNone(self.db.get((self.cf_a, b"a"))) def test_write_batch(self): cfa = self.db.get_column_family(b"A") batch = rocksdb.WriteBatch() batch.put((cfa, b"key"), b"v1") batch.delete((self.cf_a, b"key")) batch.put((cfa, b"key"), b"v2") batch.put((cfa, b"key"), b"v3") batch.put((cfa, b"a"), b"1") batch.put((cfa, b"b"), b"2") self.db.write(batch) query = [(cfa, b"key"), (cfa, b"a"), (cfa, b"b")] ret = self.db.multi_get(query) self.assertEqual(b"v3", ret[query[0]]) self.assertEqual(b"1", ret[query[1]]) self.assertEqual(b"2", ret[query[2]]) def test_key_may_exists(self): self.db.put((self.cf_a, b"a"), b'1') self.assertEqual( (False, None), self.db.key_may_exist((self.cf_a, b"x")) ) self.assertEqual( (False, None), self.db.key_may_exist((self.cf_a, b'x'), fetch=True) ) self.assertEqual( (True, None), self.db.key_may_exist((self.cf_a, b'a')) ) self.assertEqual( (True, b'1'), self.db.key_may_exist((self.cf_a, b'a'), fetch=True) ) def test_iter_keys(self): for x in range(300): self.db.put((self.cf_a, int_to_bytes(x)), int_to_bytes(x)) it = self.db.iterkeys(self.cf_a) self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([(self.cf_a, b'99')], list(it)) ref = sorted([(self.cf_a, int_to_bytes(x)) for x in range(300)]) it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = sorted([(self.cf_a, int_to_bytes(x)) for x in range(90, 100)]) self.assertEqual(ref, list(it)) def test_iter_values(self): for x in range(300): self.db.put((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) it = self.db.itervalues(self.cf_b) self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([b'99000'], list(it)) ref = sorted([int_to_bytes(x) for x in range(300)]) ref = [int_to_bytes(int(x) * 1000) for x in ref] it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = [int_to_bytes(x * 1000) for x in range(90, 100)] self.assertEqual(ref, list(it)) def test_iter_items(self): for x in range(300): self.db.put((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) it = self.db.iteritems(self.cf_b) self.assertEqual([], list(it)) it.seek_to_last() self.assertEqual([((self.cf_b, b'99'), b'99000')], list(it)) ref = sorted([int_to_bytes(x) for x in range(300)]) ref = [((self.cf_b, x), int_to_bytes(int(x) * 1000)) for x in ref] it.seek_to_first() self.assertEqual(ref, list(it)) it.seek(b'90') ref = [((self.cf_b, int_to_bytes(x)), int_to_bytes(x * 1000)) for x in range(90, 100)] self.assertEqual(ref, list(it)) def test_reverse_iter(self): for x in range(100): self.db.put((self.cf_a, int_to_bytes(x)), int_to_bytes(x * 1000)) it = self.db.iteritems(self.cf_a) it.seek_to_last() ref = reversed(sorted([(self.cf_a, int_to_bytes(x)) for x in range(100)])) ref = [(x, int_to_bytes(int(x[1]) * 1000)) for x in ref] self.assertEqual(ref, list(reversed(it))) def test_snapshot(self): cfa = self.db.get_column_family(b'A') self.db.put((cfa, b"a"), b"1") self.db.put((cfa, b"b"), b"2") snapshot = self.db.snapshot() self.db.put((cfa, b"a"), b"2") self.db.delete((cfa, b"b")) it = self.db.iteritems(cfa) it.seek_to_first() self.assertEqual({(cfa, b'a'): b'2'}, dict(it)) it = self.db.iteritems(cfa, snapshot=snapshot) it.seek_to_first() self.assertEqual({(cfa, b'a'): b'1', (cfa, b'b'): b'2'}, dict(it)) def test_get_property(self): for x in range(300): x = int_to_bytes(x) self.db.put((self.cf_a, x), x) self.assertEqual(b"300", self.db.get_property(b'rocksdb.estimate-num-keys', self.cf_a)) self.assertIsNone(self.db.get_property(b'does not exsits', self.cf_a)) def test_compact_range(self): for x in range(10000): x = int_to_bytes(x) self.db.put((self.cf_b, x), x) self.db.compact_range(column_family=self.cf_b) python-rocksdb-0.8.0~rc3/rocksdb/tests/test_memtable.py000066400000000000000000000013721400433636700232740ustar00rootroot00000000000000# content of test_sample.py import rocksdb import pytest import shutil import os import tempfile def test_open_skiplist_memtable_factory(): opts = rocksdb.Options() opts.memtable_factory = rocksdb.SkipListMemtableFactory() opts.create_if_missing = True loc = tempfile.mkdtemp() try: test_db = rocksdb.DB(os.path.join(loc, "test"), opts) finally: shutil.rmtree(loc) def test_open_vector_memtable_factory(): opts = rocksdb.Options() opts.allow_concurrent_memtable_write = False opts.memtable_factory = rocksdb.VectorMemtableFactory() opts.create_if_missing = True loc = tempfile.mkdtemp() try: test_db = rocksdb.DB(os.path.join(loc, "test"), opts) finally: shutil.rmtree(loc) python-rocksdb-0.8.0~rc3/rocksdb/tests/test_options.py000066400000000000000000000153521400433636700232040ustar00rootroot00000000000000import unittest import sys import rocksdb class TestFilterPolicy(rocksdb.interfaces.FilterPolicy): def create_filter(self, keys): return b'nix' def key_may_match(self, key, fil): return True def name(self): return b'testfilter' class TestMergeOperator(rocksdb.interfaces.MergeOperator): def full_merge(self, *args, **kwargs): return (False, None) def partial_merge(self, *args, **kwargs): return (False, None) def name(self): return b'testmergeop' class TestOptions(unittest.TestCase): # def test_default_merge_operator(self): # opts = rocksdb.Options() # self.assertEqual(True, opts.paranoid_checks) # opts.paranoid_checks = False # self.assertEqual(False, opts.paranoid_checks) # self.assertIsNone(opts.merge_operator) # opts.merge_operator = "uint64add" # self.assertIsNotNone(opts.merge_operator) # self.assertEqual(opts.merge_operator, "uint64add") # with self.assertRaises(TypeError): # opts.merge_operator = "not an operator" # FIXME: travis test should include the latest version of rocksdb # def test_compaction_pri(self): # opts = rocksdb.Options() # default compaction_pri # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size) # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.min_overlapping_ratio) # opts.compaction_pri = rocksdb.CompactionPri.by_compensated_size # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.by_compensated_size) # opts.compaction_pri = rocksdb.CompactionPri.oldest_largest_seq_first # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.oldest_largest_seq_first) # opts.compaction_pri = rocksdb.CompactionPri.min_overlapping_ratio # self.assertEqual(opts.compaction_pri, rocksdb.CompactionPri.min_overlapping_ratio) def test_enable_write_thread_adaptive_yield(self): opts = rocksdb.Options() self.assertEqual(opts.enable_write_thread_adaptive_yield, True) opts.enable_write_thread_adaptive_yield = False self.assertEqual(opts.enable_write_thread_adaptive_yield, False) def test_allow_concurrent_memtable_write(self): opts = rocksdb.Options() self.assertEqual(opts.allow_concurrent_memtable_write, True) opts.allow_concurrent_memtable_write = False self.assertEqual(opts.allow_concurrent_memtable_write, False) def test_compression_opts(self): opts = rocksdb.Options() compression_opts = opts.compression_opts # default value self.assertEqual(isinstance(compression_opts, dict), True) self.assertEqual(compression_opts['window_bits'], -14) # This doesn't match rocksdb latest # self.assertEqual(compression_opts['level'], -1) self.assertEqual(compression_opts['strategy'], 0) self.assertEqual(compression_opts['max_dict_bytes'], 0) with self.assertRaises(TypeError): opts.compression_opts = list(1,2) opts.compression_opts = {'window_bits': 1, 'level': 2, 'strategy': 3, 'max_dict_bytes': 4} compression_opts = opts.compression_opts self.assertEqual(compression_opts['window_bits'], 1) self.assertEqual(compression_opts['level'], 2) self.assertEqual(compression_opts['strategy'], 3) self.assertEqual(compression_opts['max_dict_bytes'], 4) def test_simple(self): opts = rocksdb.Options() self.assertEqual(True, opts.paranoid_checks) opts.paranoid_checks = False self.assertEqual(False, opts.paranoid_checks) self.assertIsNone(opts.merge_operator) ob = TestMergeOperator() opts.merge_operator = ob self.assertEqual(opts.merge_operator, ob) self.assertIsInstance( opts.comparator, rocksdb.BytewiseComparator) self.assertIn(opts.compression, (rocksdb.CompressionType.no_compression, rocksdb.CompressionType.snappy_compression)) opts.compression = rocksdb.CompressionType.zstd_compression self.assertEqual(rocksdb.CompressionType.zstd_compression, opts.compression) def test_block_options(self): rocksdb.BlockBasedTableFactory( block_size=4096, filter_policy=TestFilterPolicy(), block_cache=rocksdb.LRUCache(100)) def test_unicode_path(self): name = b'/tmp/M\xc3\xbcnchen'.decode('utf8') opts = rocksdb.Options() opts.db_log_dir = name opts.wal_dir = name self.assertEqual(name, opts.db_log_dir) self.assertEqual(name, opts.wal_dir) def test_table_factory(self): opts = rocksdb.Options() self.assertIsNone(opts.table_factory) opts.table_factory = rocksdb.BlockBasedTableFactory() opts.table_factory = rocksdb.PlainTableFactory() def test_compaction_style(self): opts = rocksdb.Options() self.assertEqual('level', opts.compaction_style) opts.compaction_style = 'universal' self.assertEqual('universal', opts.compaction_style) opts.compaction_style = 'level' self.assertEqual('level', opts.compaction_style) if sys.version_info[0] == 3: assertRaisesRegex = self.assertRaisesRegex else: assertRaisesRegex = self.assertRaisesRegexp with assertRaisesRegex(Exception, 'Unknown compaction style'): opts.compaction_style = 'foo' def test_compaction_opts_universal(self): opts = rocksdb.Options() uopts = opts.compaction_options_universal self.assertEqual(-1, uopts['compression_size_percent']) self.assertEqual(200, uopts['max_size_amplification_percent']) self.assertEqual('total_size', uopts['stop_style']) self.assertEqual(1, uopts['size_ratio']) self.assertEqual(2, uopts['min_merge_width']) self.assertGreaterEqual(4294967295, uopts['max_merge_width']) new_opts = {'stop_style': 'similar_size', 'max_merge_width': 30} opts.compaction_options_universal = new_opts uopts = opts.compaction_options_universal self.assertEqual(-1, uopts['compression_size_percent']) self.assertEqual(200, uopts['max_size_amplification_percent']) self.assertEqual('similar_size', uopts['stop_style']) self.assertEqual(1, uopts['size_ratio']) self.assertEqual(2, uopts['min_merge_width']) self.assertEqual(30, uopts['max_merge_width']) def test_row_cache(self): opts = rocksdb.Options() self.assertIsNone(opts.row_cache) opts.row_cache = cache = rocksdb.LRUCache(2*1024*1024) self.assertEqual(cache, opts.row_cache) python-rocksdb-0.8.0~rc3/rocksdb/universal_compaction.pxd000066400000000000000000000007741400433636700237010ustar00rootroot00000000000000cdef extern from "rocksdb/universal_compaction.h" namespace "rocksdb": ctypedef enum CompactionStopStyle: kCompactionStopStyleSimilarSize kCompactionStopStyleTotalSize cdef cppclass CompactionOptionsUniversal: CompactionOptionsUniversal() unsigned int size_ratio unsigned int min_merge_width unsigned int max_merge_width unsigned int max_size_amplification_percent int compression_size_percent CompactionStopStyle stop_style python-rocksdb-0.8.0~rc3/setup.cfg000066400000000000000000000023701400433636700171240ustar00rootroot00000000000000[metadata] name = rocksdb version = 0.8.0rc3 description = Python bindings for RocksDB long_description = file: README.rst long_description_content_type = text/x-rst author = Martina Ferrari author_email = tina@tina.pm url = https://github.com/NightTsarina/python-rocksdb project_urls = Bug Reports = https://github.com/NightTsarina/python-rocksdb/issues Repository = https://github.com/NightTsarina/python-rocksdb license = BSD 3-Clause License license_file = LICENSE keywords = rocksdb bindings classifiers = Development Status :: 3 - Alpha Intended Audience :: Developers License :: OSI Approved :: BSD License Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Topic :: Database [options] packages = find: package_dir = rocksdb = rocksdb include_package_data = True zip_safe = False setup_requires = cython >= 0.20 setuptools >= 25 install_requires = setuptools >= 25 test_require = pytest [options.packages.find] where = . [options.extras_require] doc = sphinx sphinx_rtd_theme [build_sphinx] source-dir = docs build-dir = docs/_build all_files = 1 [upload_sphinx] upload-dir = docs/_build/html python-rocksdb-0.8.0~rc3/setup.py000066400000000000000000000015131400433636700170130ustar00rootroot00000000000000#!/usr/bin/env python3 import platform import setuptools import sys extra_compile_args = [ '-std=c++11', '-O3', '-Wall', '-Wextra', '-Wconversion', '-fno-strict-aliasing', '-fno-rtti', ] if platform.system() == 'Darwin': extra_compile_args += ['-mmacosx-version-min=10.7', '-stdlib=libc++'] if sys.version_info < (3 , 0): raise Exception("python-rocksdb requires Python 3.x") setuptools.setup( ext_modules=[ setuptools.Extension( 'rocksdb._rocksdb', [ 'rocksdb/_rocksdb.pyx', ], extra_compile_args=extra_compile_args, language='c++', libraries=[ 'rocksdb', 'snappy', 'bz2', 'z', 'lz4', ], ), ], ) python-rocksdb-0.8.0~rc3/tox.ini000066400000000000000000000004611400433636700166150ustar00rootroot00000000000000[tox] envlist = py35,py36,py37,py38,py39 minversion = 2.0 skipsdist = True [testenv] #skip_install = True #deps = # -e # .[test] commands = pytest-3 {posargs:rocksdb/tests} [testenv:docs] deps = .[doc] commands = python3 setup.py build_sphinx -W [pytest] addopts = --verbose norecursedirs = .tox